diff --git a/Class-Labs/Lab-7(Langflow-Nocodetoll)/Images/Refresh Button.png b/Class-Labs/Lab-7(Langflow-Nocodetoll)/Images/Refresh Button.png new file mode 100644 index 0000000..12cbbcc Binary files /dev/null and b/Class-Labs/Lab-7(Langflow-Nocodetoll)/Images/Refresh Button.png differ diff --git a/Class-Labs/Lab-7(Langflow-Nocodetoll)/Images/Screenshot (1515).png b/Class-Labs/Lab-7(Langflow-Nocodetoll)/Images/Screenshot (1515).png new file mode 100644 index 0000000..b4148e5 Binary files /dev/null and b/Class-Labs/Lab-7(Langflow-Nocodetoll)/Images/Screenshot (1515).png differ diff --git a/Class-Labs/Lab-7(Langflow-Nocodetoll)/Images/Screenshot (1516).png b/Class-Labs/Lab-7(Langflow-Nocodetoll)/Images/Screenshot (1516).png new file mode 100644 index 0000000..fb6c2c3 Binary files /dev/null and b/Class-Labs/Lab-7(Langflow-Nocodetoll)/Images/Screenshot (1516).png differ diff --git a/Class-Labs/Lab-7(Langflow-Nocodetoll)/Images/Screenshot (1517).png b/Class-Labs/Lab-7(Langflow-Nocodetoll)/Images/Screenshot (1517).png new file mode 100644 index 0000000..91cd884 Binary files /dev/null and b/Class-Labs/Lab-7(Langflow-Nocodetoll)/Images/Screenshot (1517).png differ diff --git a/Class-Labs/Lab-7(Langflow-Nocodetoll)/Images/Screenshot (1518).png b/Class-Labs/Lab-7(Langflow-Nocodetoll)/Images/Screenshot (1518).png new file mode 100644 index 0000000..fbc2f39 Binary files /dev/null and b/Class-Labs/Lab-7(Langflow-Nocodetoll)/Images/Screenshot (1518).png differ diff --git a/Class-Labs/Lab-7(Langflow-Nocodetoll)/Images/Screenshot (1519).png b/Class-Labs/Lab-7(Langflow-Nocodetoll)/Images/Screenshot (1519).png new file mode 100644 index 0000000..1b8a6ec Binary files /dev/null and b/Class-Labs/Lab-7(Langflow-Nocodetoll)/Images/Screenshot (1519).png differ diff --git a/Class-Labs/Lab-7(Langflow-Nocodetoll)/Images/Screenshot (1520).png b/Class-Labs/Lab-7(Langflow-Nocodetoll)/Images/Screenshot (1520).png new file mode 100644 index 0000000..771bb98 Binary files /dev/null and b/Class-Labs/Lab-7(Langflow-Nocodetoll)/Images/Screenshot (1520).png differ diff --git a/Class-Labs/Lab-7(Langflow-Nocodetoll)/Images/Screenshot (1549).png b/Class-Labs/Lab-7(Langflow-Nocodetoll)/Images/Screenshot (1549).png new file mode 100644 index 0000000..f4fcb9d Binary files /dev/null and b/Class-Labs/Lab-7(Langflow-Nocodetoll)/Images/Screenshot (1549).png differ diff --git a/Class-Labs/Lab-7(Langflow-Nocodetoll)/Images/Screenshot (1551).png b/Class-Labs/Lab-7(Langflow-Nocodetoll)/Images/Screenshot (1551).png new file mode 100644 index 0000000..d78bb1a Binary files /dev/null and b/Class-Labs/Lab-7(Langflow-Nocodetoll)/Images/Screenshot (1551).png differ diff --git a/Class-Labs/Lab-7(Langflow-Nocodetoll)/Images/Screenshot (1552).png b/Class-Labs/Lab-7(Langflow-Nocodetoll)/Images/Screenshot (1552).png new file mode 100644 index 0000000..b2150bd Binary files /dev/null and b/Class-Labs/Lab-7(Langflow-Nocodetoll)/Images/Screenshot (1552).png differ diff --git a/Class-Labs/Lab-7(Langflow-Nocodetoll)/Images/Screenshot (1553).png b/Class-Labs/Lab-7(Langflow-Nocodetoll)/Images/Screenshot (1553).png new file mode 100644 index 0000000..888cd03 Binary files /dev/null and b/Class-Labs/Lab-7(Langflow-Nocodetoll)/Images/Screenshot (1553).png differ diff --git a/Class-Labs/Lab-7(Langflow-Nocodetoll)/Images/Screenshot (1554).png b/Class-Labs/Lab-7(Langflow-Nocodetoll)/Images/Screenshot (1554).png new file mode 100644 index 0000000..b6362ab Binary files /dev/null and b/Class-Labs/Lab-7(Langflow-Nocodetoll)/Images/Screenshot (1554).png differ diff --git a/Class-Labs/Lab-7(Langflow-Nocodetoll)/Images/Screenshot (1555).png b/Class-Labs/Lab-7(Langflow-Nocodetoll)/Images/Screenshot (1555).png new file mode 100644 index 0000000..a603b01 Binary files /dev/null and b/Class-Labs/Lab-7(Langflow-Nocodetoll)/Images/Screenshot (1555).png differ diff --git a/Class-Labs/Lab-7(Langflow-Nocodetoll)/Images/Screenshot (1556).png b/Class-Labs/Lab-7(Langflow-Nocodetoll)/Images/Screenshot (1556).png new file mode 100644 index 0000000..e05329b Binary files /dev/null and b/Class-Labs/Lab-7(Langflow-Nocodetoll)/Images/Screenshot (1556).png differ diff --git a/Class-Labs/Lab-7(Langflow-Nocodetoll)/Images/Screenshot (1557).png b/Class-Labs/Lab-7(Langflow-Nocodetoll)/Images/Screenshot (1557).png new file mode 100644 index 0000000..ca38513 Binary files /dev/null and b/Class-Labs/Lab-7(Langflow-Nocodetoll)/Images/Screenshot (1557).png differ diff --git a/Class-Labs/Lab-7(Langflow-Nocodetoll)/Images/Screenshot (1558).png b/Class-Labs/Lab-7(Langflow-Nocodetoll)/Images/Screenshot (1558).png new file mode 100644 index 0000000..030e0a1 Binary files /dev/null and b/Class-Labs/Lab-7(Langflow-Nocodetoll)/Images/Screenshot (1558).png differ diff --git a/Class-Labs/Lab-7(Langflow-Nocodetoll)/Images/img1.png b/Class-Labs/Lab-7(Langflow-Nocodetoll)/Images/img1.png new file mode 100644 index 0000000..1f092fe Binary files /dev/null and b/Class-Labs/Lab-7(Langflow-Nocodetoll)/Images/img1.png differ diff --git a/Class-Labs/Lab-7(Langflow-Nocodetoll)/Langlow NoCode JSON File LAB.json b/Class-Labs/Lab-7(Langflow-Nocodetoll)/Langlow NoCode JSON File LAB.json new file mode 100644 index 0000000..b93150e --- /dev/null +++ b/Class-Labs/Lab-7(Langflow-Nocodetoll)/Langlow NoCode JSON File LAB.json @@ -0,0 +1,3539 @@ +{ + "id": "fc821cf8-ef2e-43f2-93ee-83b512f4d38b", + "data": { + "nodes": [ + { + "data": { + "description": "Create a prompt template with dynamic variables.", + "display_name": "Prompt", + "id": "Prompt-GSe05", + "node": { + "template": { + "_type": "Component", + "code": { + "type": "code", + "required": true, + "placeholder": "", + "list": false, + "show": true, + "multiline": true, + "value": "from langflow.base.prompts.api_utils import process_prompt_template\nfrom langflow.custom import Component\nfrom langflow.inputs.inputs import DefaultPromptField\nfrom langflow.io import MessageTextInput, Output, PromptInput\nfrom langflow.schema.message import Message\nfrom langflow.template.utils import update_template_values\n\n\nclass PromptComponent(Component):\n display_name: str = \"Prompt\"\n description: str = \"Create a prompt template with dynamic variables.\"\n icon = \"prompts\"\n trace_type = \"prompt\"\n name = \"Prompt\"\n\n inputs = [\n PromptInput(name=\"template\", display_name=\"Template\"),\n MessageTextInput(\n name=\"tool_placeholder\",\n display_name=\"Tool Placeholder\",\n tool_mode=True,\n advanced=True,\n info=\"A placeholder input for tool mode.\",\n ),\n ]\n\n outputs = [\n Output(display_name=\"Prompt Message\", name=\"prompt\", method=\"build_prompt\"),\n ]\n\n async def build_prompt(self) -> Message:\n prompt = Message.from_template(**self._attributes)\n self.status = prompt.text\n return prompt\n\n def _update_template(self, frontend_node: dict):\n prompt_template = frontend_node[\"template\"][\"template\"][\"value\"]\n custom_fields = frontend_node[\"custom_fields\"]\n frontend_node_template = frontend_node[\"template\"]\n _ = process_prompt_template(\n template=prompt_template,\n name=\"template\",\n custom_fields=custom_fields,\n frontend_node_template=frontend_node_template,\n )\n return frontend_node\n\n async def update_frontend_node(self, new_frontend_node: dict, current_frontend_node: dict):\n \"\"\"This function is called after the code validation is done.\"\"\"\n frontend_node = await super().update_frontend_node(new_frontend_node, current_frontend_node)\n template = frontend_node[\"template\"][\"template\"][\"value\"]\n # Kept it duplicated for backwards compatibility\n _ = process_prompt_template(\n template=template,\n name=\"template\",\n custom_fields=frontend_node[\"custom_fields\"],\n frontend_node_template=frontend_node[\"template\"],\n )\n # Now that template is updated, we need to grab any values that were set in the current_frontend_node\n # and update the frontend_node with those values\n update_template_values(new_template=frontend_node, previous_template=current_frontend_node[\"template\"])\n return frontend_node\n\n def _get_fallback_input(self, **kwargs):\n return DefaultPromptField(**kwargs)\n", + "fileTypes": [], + "file_path": "", + "password": false, + "name": "code", + "advanced": true, + "dynamic": true, + "info": "", + "load_from_db": false, + "title_case": false + }, + "template": { + "tool_mode": false, + "trace_as_input": true, + "list": false, + "list_add_label": "Add More", + "required": false, + "placeholder": "", + "show": true, + "name": "template", + "value": "Reference 1:\n\n{references}\n\n---\n\n{instructions}\n\nBlog: \n\n", + "display_name": "Template", + "advanced": false, + "dynamic": false, + "info": "", + "title_case": false, + "type": "prompt", + "_input_type": "PromptInput", + "load_from_db": false + }, + "tool_placeholder": { + "tool_mode": true, + "trace_as_input": true, + "trace_as_metadata": true, + "load_from_db": false, + "list": false, + "list_add_label": "Add More", + "required": false, + "placeholder": "", + "show": true, + "name": "tool_placeholder", + "value": "", + "display_name": "Tool Placeholder", + "advanced": true, + "input_types": [ + "Message" + ], + "dynamic": false, + "info": "A placeholder input for tool mode.", + "title_case": false, + "type": "str", + "_input_type": "MessageTextInput" + }, + "references": { + "field_type": "str", + "required": false, + "placeholder": "", + "list": false, + "show": true, + "multiline": true, + "value": "", + "fileTypes": [], + "file_path": "", + "name": "references", + "display_name": "references", + "advanced": false, + "input_types": [ + "Message" + ], + "dynamic": false, + "info": "", + "load_from_db": false, + "title_case": false, + "type": "str" + }, + "instructions": { + "field_type": "str", + "required": false, + "placeholder": "", + "list": false, + "show": true, + "multiline": true, + "value": "", + "fileTypes": [], + "file_path": "", + "name": "instructions", + "display_name": "instructions", + "advanced": false, + "input_types": [ + "Message" + ], + "dynamic": false, + "info": "", + "load_from_db": false, + "title_case": false, + "type": "str" + } + }, + "description": "Create a prompt template with dynamic variables.", + "icon": "prompts", + "base_classes": [ + "Message" + ], + "display_name": "Prompt", + "documentation": "", + "minimized": false, + "custom_fields": { + "template": [ + "references", + "instructions" + ] + }, + "output_types": [], + "pinned": false, + "conditional_paths": [], + "frozen": false, + "outputs": [ + { + "types": [ + "Message" + ], + "selected": "Message", + "name": "prompt", + "hidden": null, + "display_name": "Prompt Message", + "method": "build_prompt", + "value": "__UNDEFINED__", + "cache": true, + "required_inputs": null, + "allows_loop": false, + "tool_mode": true + } + ], + "field_order": [ + "template", + "tool_placeholder" + ], + "beta": false, + "legacy": false, + "edited": false, + "metadata": {}, + "tool_mode": false, + "lf_version": "1.1.3.dev5" + }, + "type": "Prompt" + }, + "dragging": false, + "height": 431, + "id": "Prompt-GSe05", + "position": { + "x": 1187.3042451245844, + "y": 573.4297367698657 + }, + "positionAbsolute": { + "x": 1341.1018009526915, + "y": 456.4098573354365 + }, + "selected": false, + "type": "genericNode", + "width": 320, + "measured": { + "width": 320, + "height": 431 + } + }, + { + "data": { + "description": "Get text inputs from the Playground.", + "display_name": "Instructions", + "id": "TextInput-pqkFF", + "node": { + "template": { + "_type": "Component", + "code": { + "type": "code", + "required": true, + "placeholder": "", + "list": false, + "show": true, + "multiline": true, + "value": "from langflow.base.io.text import TextComponent\nfrom langflow.io import MultilineInput, Output\nfrom langflow.schema.message import Message\n\n\nclass TextInputComponent(TextComponent):\n display_name = \"Text Input\"\n description = \"Get text inputs from the Playground.\"\n icon = \"type\"\n name = \"TextInput\"\n\n inputs = [\n MultilineInput(\n name=\"input_value\",\n display_name=\"Text\",\n info=\"Text to be passed as input.\",\n ),\n ]\n outputs = [\n Output(display_name=\"Message\", name=\"text\", method=\"text_response\"),\n ]\n\n def text_response(self) -> Message:\n return Message(\n text=self.input_value,\n )\n", + "fileTypes": [], + "file_path": "", + "password": false, + "name": "code", + "advanced": true, + "dynamic": true, + "info": "", + "load_from_db": false, + "title_case": false + }, + "input_value": { + "tool_mode": false, + "trace_as_input": true, + "multiline": true, + "trace_as_metadata": true, + "load_from_db": false, + "list": false, + "list_add_label": "Add More", + "required": false, + "placeholder": "", + "show": true, + "name": "input_value", + "value": "\"Write a detailed, human-like article in the format by analyzing the {reference} data:\n\nHeadline: Use a question or statement to evoke curiosity and clearly convey the topic.\nIntroduction (What): Briefly introduce the main idea, highlighting key shifts or trends. Use at least one data point or fact to establish credibility.\nSupporting Evidence: Provide concrete statistics or examples to support the claims made in the introduction. This section should be analytical and data-driven.\nAnalysis (So What): Explain why the trends or changes are important. Discuss their implications for businesses, industries, or individuals. Use logical transitions to guide the reader.\nActionable Insights (Now What): Offer practical advice or steps to prepare for or leverage the trends. Use bullet points for clarity and focus on actionable language.\nCall to Action: Conclude with an aspirational note, emphasizing long-term benefits and motivating readers to take action.\nTone:\n\nEngaging and slightly informal.\nData-driven where necessary.\nDirective and motivational in the actionable insights section.\nTopic: [Insert your topic here, e.g., 'The Role of Blockchain in Future Supply Chain Management.']\"\n\nNote : The article should be something like reader love to read it not be boring \n\n", + "display_name": "Text", + "advanced": false, + "input_types": [ + "Message" + ], + "dynamic": false, + "info": "Text to be passed as input.", + "title_case": false, + "type": "str", + "_input_type": "MultilineInput" + } + }, + "description": "Get text inputs from the Playground.", + "icon": "type", + "base_classes": [ + "Message" + ], + "display_name": "Text Input", + "documentation": "", + "minimized": false, + "custom_fields": {}, + "output_types": [], + "pinned": false, + "conditional_paths": [], + "frozen": false, + "outputs": [ + { + "types": [ + "Message" + ], + "selected": "Message", + "name": "text", + "hidden": null, + "display_name": "Message", + "method": "text_response", + "value": "__UNDEFINED__", + "cache": true, + "required_inputs": null, + "allows_loop": false, + "tool_mode": true + } + ], + "field_order": [ + "input_value" + ], + "beta": false, + "legacy": false, + "edited": false, + "metadata": {}, + "tool_mode": false, + "lf_version": "1.1.3.dev5" + }, + "type": "TextInput" + }, + "dragging": false, + "height": 233, + "id": "TextInput-pqkFF", + "position": { + "x": 676.39889111216, + "y": 391.53064719037116 + }, + "positionAbsolute": { + "x": 955.8314364398983, + "y": 402.24423846638155 + }, + "selected": false, + "type": "genericNode", + "width": 320, + "measured": { + "width": 320, + "height": 233 + } + }, + { + "data": { + "description": "Display a chat message in the Playground.", + "display_name": "Chat Output", + "id": "ChatOutput-kTP2v", + "node": { + "template": { + "_type": "Component", + "background_color": { + "tool_mode": false, + "trace_as_input": true, + "trace_as_metadata": true, + "load_from_db": false, + "list": false, + "list_add_label": "Add More", + "required": false, + "placeholder": "", + "show": true, + "name": "background_color", + "value": "", + "display_name": "Background Color", + "advanced": true, + "input_types": [ + "Message" + ], + "dynamic": false, + "info": "The background color of the icon.", + "title_case": false, + "type": "str", + "_input_type": "MessageTextInput" + }, + "chat_icon": { + "tool_mode": false, + "trace_as_input": true, + "trace_as_metadata": true, + "load_from_db": false, + "list": false, + "list_add_label": "Add More", + "required": false, + "placeholder": "", + "show": true, + "name": "chat_icon", + "value": "", + "display_name": "Icon", + "advanced": true, + "input_types": [ + "Message" + ], + "dynamic": false, + "info": "The icon of the message.", + "title_case": false, + "type": "str", + "_input_type": "MessageTextInput" + }, + "code": { + "type": "code", + "required": true, + "placeholder": "", + "list": false, + "show": true, + "multiline": true, + "value": "from langflow.base.io.chat import ChatComponent\nfrom langflow.inputs import BoolInput\nfrom langflow.io import DropdownInput, MessageInput, MessageTextInput, Output\nfrom langflow.schema.message import Message\nfrom langflow.schema.properties import Source\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_AI,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatOutput\"\n minimized = True\n\n inputs = [\n MessageInput(\n name=\"input_value\",\n display_name=\"Text\",\n info=\"Message to be passed as output.\",\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_AI,\n advanced=True,\n info=\"Type of sender.\",\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_AI,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"data_template\",\n display_name=\"Data Template\",\n value=\"{text}\",\n advanced=True,\n info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(\n display_name=\"Message\",\n name=\"message\",\n method=\"message_response\",\n ),\n ]\n\n def _build_source(self, id_: str | None, display_name: str | None, source: str | None) -> Source:\n source_dict = {}\n if id_:\n source_dict[\"id\"] = id_\n if display_name:\n source_dict[\"display_name\"] = display_name\n if source:\n source_dict[\"source\"] = source\n return Source(**source_dict)\n\n async def message_response(self) -> Message:\n source, icon, display_name, source_id = self.get_properties_from_source_component()\n background_color = self.background_color\n text_color = self.text_color\n if self.chat_icon:\n icon = self.chat_icon\n message = self.input_value if isinstance(self.input_value, Message) else Message(text=self.input_value)\n message.sender = self.sender\n message.sender_name = self.sender_name\n message.session_id = self.session_id\n message.flow_id = self.graph.flow_id if hasattr(self, \"graph\") else None\n message.properties.source = self._build_source(source_id, display_name, source)\n message.properties.icon = icon\n message.properties.background_color = background_color\n message.properties.text_color = text_color\n if self.session_id and isinstance(message, Message) and self.should_store_message:\n stored_message = await self.send_message(\n message,\n )\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n", + "fileTypes": [], + "file_path": "", + "password": false, + "name": "code", + "advanced": true, + "dynamic": true, + "info": "", + "load_from_db": false, + "title_case": false + }, + "data_template": { + "tool_mode": false, + "trace_as_input": true, + "trace_as_metadata": true, + "load_from_db": false, + "list": false, + "list_add_label": "Add More", + "required": false, + "placeholder": "", + "show": true, + "name": "data_template", + "value": "{text}", + "display_name": "Data Template", + "advanced": true, + "input_types": [ + "Message" + ], + "dynamic": false, + "info": "Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.", + "title_case": false, + "type": "str", + "_input_type": "MessageTextInput" + }, + "input_value": { + "trace_as_input": true, + "tool_mode": false, + "trace_as_metadata": true, + "load_from_db": false, + "list": false, + "list_add_label": "Add More", + "required": false, + "placeholder": "", + "show": true, + "name": "input_value", + "value": "", + "display_name": "Text", + "advanced": false, + "input_types": [ + "Message" + ], + "dynamic": false, + "info": "Message to be passed as output.", + "title_case": false, + "type": "str", + "_input_type": "MessageInput" + }, + "sender": { + "tool_mode": false, + "trace_as_metadata": true, + "options": [ + "Machine", + "User" + ], + "options_metadata": [], + "combobox": false, + "dialog_inputs": {}, + "required": false, + "placeholder": "", + "show": true, + "name": "sender", + "value": "Machine", + "display_name": "Sender Type", + "advanced": true, + "dynamic": false, + "info": "Type of sender.", + "title_case": false, + "type": "str", + "_input_type": "DropdownInput" + }, + "sender_name": { + "tool_mode": false, + "trace_as_input": true, + "trace_as_metadata": true, + "load_from_db": false, + "list": false, + "list_add_label": "Add More", + "required": false, + "placeholder": "", + "show": true, + "name": "sender_name", + "value": "AI", + "display_name": "Sender Name", + "advanced": true, + "input_types": [ + "Message" + ], + "dynamic": false, + "info": "Name of the sender.", + "title_case": false, + "type": "str", + "_input_type": "MessageTextInput" + }, + "session_id": { + "tool_mode": false, + "trace_as_input": true, + "trace_as_metadata": true, + "load_from_db": false, + "list": false, + "list_add_label": "Add More", + "required": false, + "placeholder": "", + "show": true, + "name": "session_id", + "value": "", + "display_name": "Session ID", + "advanced": true, + "input_types": [ + "Message" + ], + "dynamic": false, + "info": "The session ID of the chat. If empty, the current session ID parameter will be used.", + "title_case": false, + "type": "str", + "_input_type": "MessageTextInput" + }, + "should_store_message": { + "tool_mode": false, + "trace_as_metadata": true, + "list": false, + "list_add_label": "Add More", + "required": false, + "placeholder": "", + "show": true, + "name": "should_store_message", + "value": true, + "display_name": "Store Messages", + "advanced": true, + "dynamic": false, + "info": "Store the message in the history.", + "title_case": false, + "type": "bool", + "_input_type": "BoolInput" + }, + "text_color": { + "tool_mode": false, + "trace_as_input": true, + "trace_as_metadata": true, + "load_from_db": false, + "list": false, + "list_add_label": "Add More", + "required": false, + "placeholder": "", + "show": true, + "name": "text_color", + "value": "", + "display_name": "Text Color", + "advanced": true, + "input_types": [ + "Message" + ], + "dynamic": false, + "info": "The text color of the name", + "title_case": false, + "type": "str", + "_input_type": "MessageTextInput" + } + }, + "description": "Display a chat message in the Playground.", + "icon": "MessagesSquare", + "base_classes": [ + "Message" + ], + "display_name": "Chat Output", + "documentation": "", + "minimized": true, + "custom_fields": {}, + "output_types": [], + "pinned": false, + "conditional_paths": [], + "frozen": false, + "outputs": [ + { + "types": [ + "Message" + ], + "selected": "Message", + "name": "message", + "hidden": null, + "display_name": "Message", + "method": "message_response", + "value": "__UNDEFINED__", + "cache": true, + "required_inputs": null, + "allows_loop": false, + "tool_mode": true + } + ], + "field_order": [ + "input_value", + "should_store_message", + "sender", + "sender_name", + "session_id", + "data_template", + "background_color", + "chat_icon", + "text_color" + ], + "beta": false, + "legacy": false, + "edited": false, + "metadata": {}, + "tool_mode": false, + "lf_version": "1.1.3.dev5" + }, + "type": "ChatOutput" + }, + "dragging": false, + "height": 233, + "id": "ChatOutput-kTP2v", + "position": { + "x": 2113.228183852361, + "y": 594.6116538574528 + }, + "positionAbsolute": { + "x": 2113.228183852361, + "y": 594.6116538574528 + }, + "selected": false, + "type": "genericNode", + "width": 320, + "measured": { + "width": 320, + "height": 233 + } + }, + { + "data": { + "description": "Generates text using OpenAI LLMs.", + "display_name": "OpenAI", + "id": "OpenAIModel-NT3Gk", + "node": { + "template": { + "_type": "Component", + "api_key": { + "load_from_db": false, + "required": true, + "placeholder": "", + "show": true, + "name": "api_key", + "value": "", + "display_name": "OpenAI API Key", + "advanced": false, + "input_types": [ + "Message" + ], + "dynamic": false, + "info": "The OpenAI API Key to use for the OpenAI model.", + "title_case": false, + "password": true, + "type": "str", + "_input_type": "SecretStrInput" + }, + "code": { + "type": "code", + "required": true, + "placeholder": "", + "list": false, + "show": true, + "multiline": true, + "value": "from langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import OPENAI_MODEL_NAMES\nfrom langflow.field_typing import LanguageModel\nfrom langflow.field_typing.range_spec import RangeSpec\nfrom langflow.inputs import BoolInput, DictInput, DropdownInput, IntInput, SecretStrInput, SliderInput, StrInput\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n name = \"OpenAIModel\"\n\n inputs = [\n *LCModelComponent._base_inputs,\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n range_spec=RangeSpec(min=0, max=128000),\n ),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n BoolInput(\n name=\"json_mode\",\n display_name=\"JSON Mode\",\n advanced=True,\n info=\"If True, it will output JSON regardless of passing a schema.\",\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n advanced=False,\n options=OPENAI_MODEL_NAMES,\n value=OPENAI_MODEL_NAMES[0],\n ),\n StrInput(\n name=\"openai_api_base\",\n display_name=\"OpenAI API Base\",\n advanced=True,\n info=\"The base URL of the OpenAI API. \"\n \"Defaults to https://api.openai.com/v1. \"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\",\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"The OpenAI API Key to use for the OpenAI model.\",\n advanced=False,\n value=\"OPENAI_API_KEY\",\n required=True,\n ),\n SliderInput(\n name=\"temperature\", display_name=\"Temperature\", value=0.1, range_spec=RangeSpec(min=0, max=2, step=0.01)\n ),\n IntInput(\n name=\"seed\",\n display_name=\"Seed\",\n info=\"The seed controls the reproducibility of the job.\",\n advanced=True,\n value=1,\n ),\n ]\n\n def build_model(self) -> LanguageModel: # type: ignore[type-var]\n openai_api_key = self.api_key\n temperature = self.temperature\n model_name: str = self.model_name\n max_tokens = self.max_tokens\n model_kwargs = self.model_kwargs or {}\n openai_api_base = self.openai_api_base or \"https://api.openai.com/v1\"\n json_mode = self.json_mode\n seed = self.seed\n\n api_key = SecretStr(openai_api_key).get_secret_value() if openai_api_key else None\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs,\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature if temperature is not None else 0.1,\n seed=seed,\n )\n if json_mode:\n output = output.bind(response_format={\"type\": \"json_object\"})\n\n return output\n\n def _get_exception_message(self, e: Exception):\n \"\"\"Get a message from an OpenAI exception.\n\n Args:\n e (Exception): The exception to get the message from.\n\n Returns:\n str: The message from the exception.\n \"\"\"\n try:\n from openai import BadRequestError\n except ImportError:\n return None\n if isinstance(e, BadRequestError):\n message = e.body.get(\"message\")\n if message:\n return message\n return None\n", + "fileTypes": [], + "file_path": "", + "password": false, + "name": "code", + "advanced": true, + "dynamic": true, + "info": "", + "load_from_db": false, + "title_case": false + }, + "input_value": { + "trace_as_input": true, + "tool_mode": false, + "trace_as_metadata": true, + "load_from_db": false, + "list": false, + "list_add_label": "Add More", + "required": false, + "placeholder": "", + "show": true, + "name": "input_value", + "value": "", + "display_name": "Input", + "advanced": false, + "input_types": [ + "Message" + ], + "dynamic": false, + "info": "", + "title_case": false, + "type": "str", + "_input_type": "MessageInput" + }, + "json_mode": { + "tool_mode": false, + "trace_as_metadata": true, + "list": false, + "list_add_label": "Add More", + "required": false, + "placeholder": "", + "show": true, + "name": "json_mode", + "value": false, + "display_name": "JSON Mode", + "advanced": true, + "dynamic": false, + "info": "If True, it will output JSON regardless of passing a schema.", + "title_case": false, + "type": "bool", + "_input_type": "BoolInput" + }, + "max_tokens": { + "tool_mode": false, + "trace_as_metadata": true, + "range_spec": { + "step_type": "float", + "min": 0, + "max": 128000, + "step": 0.1 + }, + "list": false, + "list_add_label": "Add More", + "required": false, + "placeholder": "", + "show": true, + "name": "max_tokens", + "value": "", + "display_name": "Max Tokens", + "advanced": true, + "dynamic": false, + "info": "The maximum number of tokens to generate. Set to 0 for unlimited tokens.", + "title_case": false, + "type": "int", + "_input_type": "IntInput" + }, + "model_kwargs": { + "tool_mode": false, + "trace_as_input": true, + "list": false, + "list_add_label": "Add More", + "required": false, + "placeholder": "", + "show": true, + "name": "model_kwargs", + "value": {}, + "display_name": "Model Kwargs", + "advanced": true, + "dynamic": false, + "info": "Additional keyword arguments to pass to the model.", + "title_case": false, + "type": "dict", + "_input_type": "DictInput" + }, + "model_name": { + "tool_mode": false, + "trace_as_metadata": true, + "options": [ + "gpt-4o-mini", + "gpt-4o", + "gpt-4-turbo", + "gpt-4-turbo-preview", + "gpt-4", + "gpt-3.5-turbo", + "gpt-3.5-turbo-0125" + ], + "options_metadata": [], + "combobox": false, + "dialog_inputs": {}, + "required": false, + "placeholder": "", + "show": true, + "name": "model_name", + "value": "gpt-4o", + "display_name": "Model Name", + "advanced": false, + "dynamic": false, + "info": "", + "title_case": false, + "type": "str", + "_input_type": "DropdownInput", + "load_from_db": false + }, + "openai_api_base": { + "tool_mode": false, + "trace_as_metadata": true, + "load_from_db": false, + "list": false, + "list_add_label": "Add More", + "required": false, + "placeholder": "", + "show": true, + "name": "openai_api_base", + "value": "", + "display_name": "OpenAI API Base", + "advanced": true, + "dynamic": false, + "info": "The base URL of the OpenAI API. Defaults to https://api.openai.com/v1. You can change this to use other APIs like JinaChat, LocalAI and Prem.", + "title_case": false, + "type": "str", + "_input_type": "StrInput" + }, + "seed": { + "tool_mode": false, + "trace_as_metadata": true, + "list": false, + "list_add_label": "Add More", + "required": false, + "placeholder": "", + "show": true, + "name": "seed", + "value": 1, + "display_name": "Seed", + "advanced": true, + "dynamic": false, + "info": "The seed controls the reproducibility of the job.", + "title_case": false, + "type": "int", + "_input_type": "IntInput" + }, + "stream": { + "tool_mode": false, + "trace_as_metadata": true, + "list": false, + "list_add_label": "Add More", + "required": false, + "placeholder": "", + "show": true, + "name": "stream", + "value": false, + "display_name": "Stream", + "advanced": false, + "dynamic": false, + "info": "Stream the response from the model. Streaming works only in Chat.", + "title_case": false, + "type": "bool", + "_input_type": "BoolInput" + }, + "system_message": { + "tool_mode": false, + "trace_as_input": true, + "multiline": true, + "trace_as_metadata": true, + "load_from_db": false, + "list": false, + "list_add_label": "Add More", + "required": false, + "placeholder": "", + "show": true, + "name": "system_message", + "value": "", + "display_name": "System Message", + "advanced": false, + "input_types": [ + "Message" + ], + "dynamic": false, + "info": "System message to pass to the model.", + "title_case": false, + "type": "str", + "_input_type": "MultilineInput" + }, + "temperature": { + "tool_mode": false, + "min_label": "", + "max_label": "", + "min_label_icon": "", + "max_label_icon": "", + "slider_buttons": false, + "slider_buttons_options": [], + "slider_input": false, + "range_spec": { + "step_type": "float", + "min": 0, + "max": 2, + "step": 0.01 + }, + "required": false, + "placeholder": "", + "show": true, + "name": "temperature", + "value": 0.1, + "display_name": "Temperature", + "advanced": false, + "dynamic": false, + "info": "", + "title_case": false, + "type": "slider", + "_input_type": "SliderInput" + } + }, + "description": "Generates text using OpenAI LLMs.", + "icon": "OpenAI", + "base_classes": [ + "LanguageModel", + "Message" + ], + "display_name": "OpenAI", + "documentation": "", + "minimized": false, + "custom_fields": {}, + "output_types": [], + "pinned": false, + "conditional_paths": [], + "frozen": false, + "outputs": [ + { + "types": [ + "Message" + ], + "selected": "Message", + "name": "text_output", + "hidden": null, + "display_name": "Message", + "method": "text_response", + "value": "__UNDEFINED__", + "cache": true, + "required_inputs": [], + "allows_loop": false, + "tool_mode": true + }, + { + "types": [ + "LanguageModel" + ], + "selected": "LanguageModel", + "name": "model_output", + "hidden": null, + "display_name": "Language Model", + "method": "build_model", + "value": "__UNDEFINED__", + "cache": true, + "required_inputs": [ + "api_key" + ], + "allows_loop": false, + "tool_mode": true + } + ], + "field_order": [ + "input_value", + "system_message", + "stream", + "max_tokens", + "model_kwargs", + "json_mode", + "model_name", + "openai_api_base", + "api_key", + "temperature", + "seed" + ], + "beta": false, + "legacy": false, + "edited": false, + "metadata": {}, + "tool_mode": false, + "lf_version": "1.1.3.dev5" + }, + "type": "OpenAIModel" + }, + "dragging": false, + "height": 669, + "id": "OpenAIModel-NT3Gk", + "position": { + "x": 1713.1213335516065, + "y": 456.3085334094866 + }, + "positionAbsolute": { + "x": 1713.1213335516065, + "y": 456.3085334094866 + }, + "selected": false, + "type": "genericNode", + "width": 320, + "measured": { + "width": 320, + "height": 669 + } + }, + { + "id": "Agent-VX80X", + "type": "genericNode", + "position": { + "x": -778.2899611808947, + "y": 490.9266186549311 + }, + "data": { + "node": { + "template": { + "_type": "Component", + "memory": { + "trace_as_metadata": true, + "list": false, + "list_add_label": "Add More", + "required": false, + "placeholder": "", + "show": true, + "name": "memory", + "value": "", + "display_name": "External Memory", + "advanced": true, + "input_types": [ + "Memory" + ], + "dynamic": false, + "info": "Retrieve messages from an external memory. If empty, it will use the Langflow tables.", + "title_case": false, + "type": "other", + "_input_type": "HandleInput" + }, + "tools": { + "trace_as_metadata": true, + "list": true, + "list_add_label": "Add More", + "required": false, + "placeholder": "", + "show": true, + "name": "tools", + "value": "", + "display_name": "Tools", + "advanced": false, + "input_types": [ + "Tool" + ], + "dynamic": false, + "info": "These are the tools that the agent can use to help with tasks.", + "title_case": false, + "type": "other", + "_input_type": "HandleInput" + }, + "add_current_date_tool": { + "tool_mode": false, + "trace_as_metadata": true, + "list": false, + "list_add_label": "Add More", + "required": false, + "placeholder": "", + "show": true, + "name": "add_current_date_tool", + "value": true, + "display_name": "Current Date", + "advanced": true, + "dynamic": false, + "info": "If true, will add a tool to the agent that returns the current date.", + "title_case": false, + "type": "bool", + "_input_type": "BoolInput" + }, + "agent_description": { + "tool_mode": false, + "trace_as_input": true, + "multiline": true, + "trace_as_metadata": true, + "load_from_db": false, + "list": false, + "list_add_label": "Add More", + "required": false, + "placeholder": "", + "show": true, + "name": "agent_description", + "value": "A helpful assistant with access to the following tools:", + "display_name": "Agent Description [Deprecated]", + "advanced": true, + "input_types": [ + "Message" + ], + "dynamic": false, + "info": "The description of the agent. This is only used when in Tool Mode. Defaults to 'A helpful assistant with access to the following tools:' and tools are added dynamically. This feature is deprecated and will be removed in future versions.", + "title_case": false, + "type": "str", + "_input_type": "MultilineInput" + }, + "agent_llm": { + "tool_mode": false, + "trace_as_metadata": true, + "options": [ + "Amazon Bedrock", + "Anthropic", + "Azure OpenAI", + "Google Generative AI", + "Groq", + "NVIDIA", + "OpenAI", + "Custom" + ], + "options_metadata": [], + "combobox": false, + "dialog_inputs": {}, + "required": false, + "placeholder": "", + "show": true, + "name": "agent_llm", + "value": "OpenAI", + "display_name": "Model Provider", + "advanced": false, + "input_types": [], + "dynamic": false, + "info": "The provider of the language model that the agent will use to generate responses.", + "real_time_refresh": true, + "title_case": false, + "type": "str", + "_input_type": "DropdownInput" + }, + "api_key": { + "load_from_db": false, + "required": true, + "placeholder": "", + "show": true, + "name": "api_key", + "value": "", + "display_name": "OpenAI API Key", + "advanced": false, + "input_types": [ + "Message" + ], + "dynamic": false, + "info": "The OpenAI API Key to use for the OpenAI model.", + "title_case": false, + "password": true, + "type": "str", + "_input_type": "SecretStrInput" + }, + "code": { + "type": "code", + "required": true, + "placeholder": "", + "list": false, + "show": true, + "multiline": true, + "value": "from langchain_core.tools import StructuredTool\n\nfrom langflow.base.agents.agent import LCToolsAgentComponent\nfrom langflow.base.models.model_input_constants import (\n ALL_PROVIDER_FIELDS,\n MODEL_DYNAMIC_UPDATE_FIELDS,\n MODEL_PROVIDERS_DICT,\n)\nfrom langflow.base.models.model_utils import get_model_name\nfrom langflow.components.helpers import CurrentDateComponent\nfrom langflow.components.helpers.memory import MemoryComponent\nfrom langflow.components.langchain_utilities.tool_calling import ToolCallingAgentComponent\nfrom langflow.custom.custom_component.component import _get_component_toolkit\nfrom langflow.custom.utils import update_component_build_config\nfrom langflow.field_typing import Tool\nfrom langflow.io import BoolInput, DropdownInput, MultilineInput, Output\nfrom langflow.logging import logger\nfrom langflow.schema.dotdict import dotdict\nfrom langflow.schema.message import Message\n\n\ndef set_advanced_true(component_input):\n component_input.advanced = True\n return component_input\n\n\nclass AgentComponent(ToolCallingAgentComponent):\n display_name: str = \"Agent\"\n description: str = \"Define the agent's instructions, then enter a task to complete using tools.\"\n icon = \"bot\"\n beta = False\n name = \"Agent\"\n\n memory_inputs = [set_advanced_true(component_input) for component_input in MemoryComponent().inputs]\n\n inputs = [\n DropdownInput(\n name=\"agent_llm\",\n display_name=\"Model Provider\",\n info=\"The provider of the language model that the agent will use to generate responses.\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"OpenAI\",\n real_time_refresh=True,\n input_types=[],\n ),\n *MODEL_PROVIDERS_DICT[\"OpenAI\"][\"inputs\"],\n MultilineInput(\n name=\"system_prompt\",\n display_name=\"Agent Instructions\",\n info=\"System Prompt: Initial instructions and context provided to guide the agent's behavior.\",\n value=\"You are a helpful assistant that can use tools to answer questions and perform tasks.\",\n advanced=False,\n ),\n *LCToolsAgentComponent._base_inputs,\n *memory_inputs,\n BoolInput(\n name=\"add_current_date_tool\",\n display_name=\"Current Date\",\n advanced=True,\n info=\"If true, will add a tool to the agent that returns the current date.\",\n value=True,\n ),\n ]\n outputs = [Output(name=\"response\", display_name=\"Response\", method=\"message_response\")]\n\n async def message_response(self) -> Message:\n try:\n llm_model, display_name = self.get_llm()\n if llm_model is None:\n msg = \"No language model selected\"\n raise ValueError(msg)\n self.model_name = get_model_name(llm_model, display_name=display_name)\n except Exception as e:\n # Log the error for debugging purposes\n logger.error(f\"Error retrieving language model: {e}\")\n raise\n\n try:\n self.chat_history = await self.get_memory_data()\n except Exception as e:\n logger.error(f\"Error retrieving chat history: {e}\")\n raise\n\n if self.add_current_date_tool:\n try:\n if not isinstance(self.tools, list): # type: ignore[has-type]\n self.tools = []\n # Convert CurrentDateComponent to a StructuredTool\n current_date_tool = (await CurrentDateComponent(**self.get_base_args()).to_toolkit()).pop(0)\n if isinstance(current_date_tool, StructuredTool):\n self.tools.append(current_date_tool)\n else:\n msg = \"CurrentDateComponent must be converted to a StructuredTool\"\n raise TypeError(msg)\n except Exception as e:\n logger.error(f\"Error adding current date tool: {e}\")\n raise\n\n if not self.tools:\n msg = \"Tools are required to run the agent.\"\n logger.error(msg)\n raise ValueError(msg)\n\n try:\n self.set(\n llm=llm_model,\n tools=self.tools,\n chat_history=self.chat_history,\n input_value=self.input_value,\n system_prompt=self.system_prompt,\n )\n agent = self.create_agent_runnable()\n except Exception as e:\n logger.error(f\"Error setting up the agent: {e}\")\n raise\n\n return await self.run_agent(agent)\n\n async def get_memory_data(self):\n memory_kwargs = {\n component_input.name: getattr(self, f\"{component_input.name}\") for component_input in self.memory_inputs\n }\n # filter out empty values\n memory_kwargs = {k: v for k, v in memory_kwargs.items() if v}\n\n return await MemoryComponent(**self.get_base_args()).set(**memory_kwargs).retrieve_messages()\n\n def get_llm(self):\n if isinstance(self.agent_llm, str):\n try:\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n display_name = component_class.display_name\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\", \"\")\n return (\n self._build_llm_model(component_class, inputs, prefix),\n display_name,\n )\n except Exception as e:\n msg = f\"Error building {self.agent_llm} language model\"\n raise ValueError(msg) from e\n return self.agent_llm, None\n\n def _build_llm_model(self, component, inputs, prefix=\"\"):\n model_kwargs = {input_.name: getattr(self, f\"{prefix}{input_.name}\") for input_ in inputs}\n return component.set(**model_kwargs).build_model()\n\n def set_component_params(self, component):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n inputs = provider_info.get(\"inputs\")\n prefix = provider_info.get(\"prefix\")\n model_kwargs = {input_.name: getattr(self, f\"{prefix}{input_.name}\") for input_ in inputs}\n\n return component.set(**model_kwargs)\n return component\n\n def delete_fields(self, build_config: dotdict, fields: dict | list[str]) -> None:\n \"\"\"Delete specified fields from build_config.\"\"\"\n for field in fields:\n build_config.pop(field, None)\n\n def update_input_types(self, build_config: dotdict) -> dotdict:\n \"\"\"Update input types for all fields in build_config.\"\"\"\n for key, value in build_config.items():\n if isinstance(value, dict):\n if value.get(\"input_types\") is None:\n build_config[key][\"input_types\"] = []\n elif hasattr(value, \"input_types\") and value.input_types is None:\n value.input_types = []\n return build_config\n\n async def update_build_config(\n self, build_config: dotdict, field_value: str, field_name: str | None = None\n ) -> dotdict:\n # Iterate over all providers in the MODEL_PROVIDERS_DICT\n # Existing logic for updating build_config\n if field_name in (\"agent_llm\",):\n build_config[\"agent_llm\"][\"value\"] = field_value\n provider_info = MODEL_PROVIDERS_DICT.get(field_value)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call the component class's update_build_config method\n build_config = await update_component_build_config(\n component_class, build_config, field_value, \"model_name\"\n )\n\n provider_configs: dict[str, tuple[dict, list[dict]]] = {\n provider: (\n MODEL_PROVIDERS_DICT[provider][\"fields\"],\n [\n MODEL_PROVIDERS_DICT[other_provider][\"fields\"]\n for other_provider in MODEL_PROVIDERS_DICT\n if other_provider != provider\n ],\n )\n for provider in MODEL_PROVIDERS_DICT\n }\n if field_value in provider_configs:\n fields_to_add, fields_to_delete = provider_configs[field_value]\n\n # Delete fields from other providers\n for fields in fields_to_delete:\n self.delete_fields(build_config, fields)\n\n # Add provider-specific fields\n if field_value == \"OpenAI\" and not any(field in build_config for field in fields_to_add):\n build_config.update(fields_to_add)\n else:\n build_config.update(fields_to_add)\n # Reset input types for agent_llm\n build_config[\"agent_llm\"][\"input_types\"] = []\n elif field_value == \"Custom\":\n # Delete all provider fields\n self.delete_fields(build_config, ALL_PROVIDER_FIELDS)\n # Update with custom component\n custom_component = DropdownInput(\n name=\"agent_llm\",\n display_name=\"Language Model\",\n options=[*sorted(MODEL_PROVIDERS_DICT.keys()), \"Custom\"],\n value=\"Custom\",\n real_time_refresh=True,\n input_types=[\"LanguageModel\"],\n )\n build_config.update({\"agent_llm\": custom_component.to_dict()})\n # Update input types for all fields\n build_config = self.update_input_types(build_config)\n\n # Validate required keys\n default_keys = [\n \"code\",\n \"_type\",\n \"agent_llm\",\n \"tools\",\n \"input_value\",\n \"add_current_date_tool\",\n \"system_prompt\",\n \"agent_description\",\n \"max_iterations\",\n \"handle_parsing_errors\",\n \"verbose\",\n ]\n missing_keys = [key for key in default_keys if key not in build_config]\n if missing_keys:\n msg = f\"Missing required keys in build_config: {missing_keys}\"\n raise ValueError(msg)\n if (\n isinstance(self.agent_llm, str)\n and self.agent_llm in MODEL_PROVIDERS_DICT\n and field_name in MODEL_DYNAMIC_UPDATE_FIELDS\n ):\n provider_info = MODEL_PROVIDERS_DICT.get(self.agent_llm)\n if provider_info:\n component_class = provider_info.get(\"component_class\")\n component_class = self.set_component_params(component_class)\n prefix = provider_info.get(\"prefix\")\n if component_class and hasattr(component_class, \"update_build_config\"):\n # Call each component class's update_build_config method\n # remove the prefix from the field_name\n if isinstance(field_name, str) and isinstance(prefix, str):\n field_name = field_name.replace(prefix, \"\")\n build_config = await update_component_build_config(\n component_class, build_config, field_value, \"model_name\"\n )\n return dotdict({k: v.to_dict() if hasattr(v, \"to_dict\") else v for k, v in build_config.items()})\n\n async def to_toolkit(self) -> list[Tool]:\n component_toolkit = _get_component_toolkit()\n tools_names = self._build_tools_names()\n agent_description = self.get_tool_description()\n # TODO: Agent Description Depreciated Feature to be removed\n description = f\"{agent_description}{tools_names}\"\n tools = component_toolkit(component=self).get_tools(\n tool_name=self.get_tool_name(), tool_description=description, callbacks=self.get_langchain_callbacks()\n )\n if hasattr(self, \"tools_metadata\"):\n tools = component_toolkit(component=self, metadata=self.tools_metadata).update_tools_metadata(tools=tools)\n return tools\n", + "fileTypes": [], + "file_path": "", + "password": false, + "name": "code", + "advanced": true, + "dynamic": true, + "info": "", + "load_from_db": false, + "title_case": false + }, + "handle_parsing_errors": { + "tool_mode": false, + "trace_as_metadata": true, + "list": false, + "list_add_label": "Add More", + "required": false, + "placeholder": "", + "show": true, + "name": "handle_parsing_errors", + "value": true, + "display_name": "Handle Parse Errors", + "advanced": true, + "dynamic": false, + "info": "Should the Agent fix errors when reading user input for better processing?", + "title_case": false, + "type": "bool", + "_input_type": "BoolInput" + }, + "input_value": { + "tool_mode": true, + "trace_as_input": true, + "trace_as_metadata": true, + "load_from_db": false, + "list": false, + "list_add_label": "Add More", + "required": false, + "placeholder": "", + "show": true, + "name": "input_value", + "value": "", + "display_name": "Input", + "advanced": false, + "input_types": [ + "Message" + ], + "dynamic": false, + "info": "The input provided by the user for the agent to process.", + "title_case": false, + "type": "str", + "_input_type": "MessageTextInput" + }, + "json_mode": { + "tool_mode": false, + "trace_as_metadata": true, + "list": false, + "list_add_label": "Add More", + "required": false, + "placeholder": "", + "show": true, + "name": "json_mode", + "value": false, + "display_name": "JSON Mode", + "advanced": true, + "dynamic": false, + "info": "If True, it will output JSON regardless of passing a schema.", + "title_case": false, + "type": "bool", + "_input_type": "BoolInput" + }, + "max_iterations": { + "tool_mode": false, + "trace_as_metadata": true, + "list": false, + "list_add_label": "Add More", + "required": false, + "placeholder": "", + "show": true, + "name": "max_iterations", + "value": 15, + "display_name": "Max Iterations", + "advanced": true, + "dynamic": false, + "info": "The maximum number of attempts the agent can make to complete its task before it stops.", + "title_case": false, + "type": "int", + "_input_type": "IntInput" + }, + "max_tokens": { + "tool_mode": false, + "trace_as_metadata": true, + "range_spec": { + "step_type": "float", + "min": 0, + "max": 128000, + "step": 0.1 + }, + "list": false, + "list_add_label": "Add More", + "required": false, + "placeholder": "", + "show": true, + "name": "max_tokens", + "value": "", + "display_name": "Max Tokens", + "advanced": true, + "dynamic": false, + "info": "The maximum number of tokens to generate. Set to 0 for unlimited tokens.", + "title_case": false, + "type": "int", + "_input_type": "IntInput" + }, + "model_kwargs": { + "tool_mode": false, + "trace_as_input": true, + "list": false, + "list_add_label": "Add More", + "required": false, + "placeholder": "", + "show": true, + "name": "model_kwargs", + "value": {}, + "display_name": "Model Kwargs", + "advanced": true, + "dynamic": false, + "info": "Additional keyword arguments to pass to the model.", + "title_case": false, + "type": "dict", + "_input_type": "DictInput" + }, + "model_name": { + "tool_mode": false, + "trace_as_metadata": true, + "options": [ + "gpt-4o-mini", + "gpt-4o", + "gpt-4-turbo", + "gpt-4-turbo-preview", + "gpt-4", + "gpt-3.5-turbo", + "gpt-3.5-turbo-0125" + ], + "options_metadata": [], + "combobox": true, + "dialog_inputs": {}, + "required": false, + "placeholder": "", + "show": true, + "name": "model_name", + "value": "gpt-4o-mini", + "display_name": "Model Name", + "advanced": false, + "dynamic": false, + "info": "To see the model names, first choose a provider. Then, enter your API key and click the refresh button next to the model name.", + "real_time_refresh": false, + "title_case": false, + "type": "str", + "_input_type": "DropdownInput" + }, + "n_messages": { + "tool_mode": false, + "trace_as_metadata": true, + "list": false, + "list_add_label": "Add More", + "required": false, + "placeholder": "", + "show": true, + "name": "n_messages", + "value": 100, + "display_name": "Number of Messages", + "advanced": true, + "dynamic": false, + "info": "Number of messages to retrieve.", + "title_case": false, + "type": "int", + "_input_type": "IntInput" + }, + "openai_api_base": { + "tool_mode": false, + "trace_as_metadata": true, + "load_from_db": false, + "list": false, + "list_add_label": "Add More", + "required": false, + "placeholder": "", + "show": true, + "name": "openai_api_base", + "value": "", + "display_name": "OpenAI API Base", + "advanced": true, + "dynamic": false, + "info": "The base URL of the OpenAI API. Defaults to https://api.openai.com/v1. You can change this to use other APIs like JinaChat, LocalAI and Prem.", + "title_case": false, + "type": "str", + "_input_type": "StrInput" + }, + "order": { + "tool_mode": true, + "trace_as_metadata": true, + "options": [ + "Ascending", + "Descending" + ], + "options_metadata": [], + "combobox": false, + "dialog_inputs": {}, + "required": false, + "placeholder": "", + "show": true, + "name": "order", + "value": "Ascending", + "display_name": "Order", + "advanced": true, + "dynamic": false, + "info": "Order of the messages.", + "title_case": false, + "type": "str", + "_input_type": "DropdownInput" + }, + "seed": { + "tool_mode": false, + "trace_as_metadata": true, + "list": false, + "list_add_label": "Add More", + "required": false, + "placeholder": "", + "show": true, + "name": "seed", + "value": 1, + "display_name": "Seed", + "advanced": true, + "dynamic": false, + "info": "The seed controls the reproducibility of the job.", + "title_case": false, + "type": "int", + "_input_type": "IntInput" + }, + "sender": { + "tool_mode": false, + "trace_as_metadata": true, + "options": [ + "Machine", + "User", + "Machine and User" + ], + "options_metadata": [], + "combobox": false, + "dialog_inputs": {}, + "required": false, + "placeholder": "", + "show": true, + "name": "sender", + "value": "Machine and User", + "display_name": "Sender Type", + "advanced": true, + "dynamic": false, + "info": "Filter by sender type.", + "title_case": false, + "type": "str", + "_input_type": "DropdownInput" + }, + "sender_name": { + "tool_mode": false, + "trace_as_input": true, + "trace_as_metadata": true, + "load_from_db": false, + "list": false, + "list_add_label": "Add More", + "required": false, + "placeholder": "", + "show": true, + "name": "sender_name", + "value": "", + "display_name": "Sender Name", + "advanced": true, + "input_types": [ + "Message" + ], + "dynamic": false, + "info": "Filter by sender name.", + "title_case": false, + "type": "str", + "_input_type": "MessageTextInput" + }, + "session_id": { + "tool_mode": false, + "trace_as_input": true, + "trace_as_metadata": true, + "load_from_db": false, + "list": false, + "list_add_label": "Add More", + "required": false, + "placeholder": "", + "show": true, + "name": "session_id", + "value": "", + "display_name": "Session ID", + "advanced": true, + "input_types": [ + "Message" + ], + "dynamic": false, + "info": "The session ID of the chat. If empty, the current session ID parameter will be used.", + "title_case": false, + "type": "str", + "_input_type": "MessageTextInput" + }, + "system_prompt": { + "tool_mode": false, + "trace_as_input": true, + "multiline": true, + "trace_as_metadata": true, + "load_from_db": false, + "list": false, + "list_add_label": "Add More", + "required": false, + "placeholder": "", + "show": true, + "name": "system_prompt", + "value": "You are a helpful assistant that can use tools to answer questions and perform tasks.", + "display_name": "Agent Instructions", + "advanced": false, + "input_types": [ + "Message" + ], + "dynamic": false, + "info": "System Prompt: Initial instructions and context provided to guide the agent's behavior.", + "title_case": false, + "type": "str", + "_input_type": "MultilineInput" + }, + "temperature": { + "tool_mode": false, + "min_label": "", + "max_label": "", + "min_label_icon": "", + "max_label_icon": "", + "slider_buttons": false, + "slider_buttons_options": [], + "slider_input": false, + "range_spec": { + "step_type": "float", + "min": 0, + "max": 2, + "step": 0.01 + }, + "required": false, + "placeholder": "", + "show": true, + "name": "temperature", + "value": 0.1, + "display_name": "Temperature", + "advanced": true, + "dynamic": false, + "info": "", + "title_case": false, + "type": "slider", + "_input_type": "SliderInput" + }, + "template": { + "tool_mode": false, + "trace_as_input": true, + "multiline": true, + "trace_as_metadata": true, + "load_from_db": false, + "list": false, + "list_add_label": "Add More", + "required": false, + "placeholder": "", + "show": true, + "name": "template", + "value": "{sender_name}: {text}", + "display_name": "Template", + "advanced": true, + "input_types": [ + "Message" + ], + "dynamic": false, + "info": "The template to use for formatting the data. It can contain the keys {text}, {sender} or any other key in the message data.", + "title_case": false, + "type": "str", + "_input_type": "MultilineInput" + }, + "verbose": { + "tool_mode": false, + "trace_as_metadata": true, + "list": false, + "list_add_label": "Add More", + "required": false, + "placeholder": "", + "show": true, + "name": "verbose", + "value": true, + "display_name": "Verbose", + "advanced": true, + "dynamic": false, + "info": "", + "title_case": false, + "type": "bool", + "_input_type": "BoolInput" + } + }, + "description": "Define the agent's instructions, then enter a task to complete using tools.", + "icon": "bot", + "base_classes": [ + "Message" + ], + "display_name": "Agent", + "documentation": "", + "minimized": false, + "custom_fields": {}, + "output_types": [], + "pinned": false, + "conditional_paths": [], + "frozen": false, + "outputs": [ + { + "types": [ + "Message" + ], + "selected": "Message", + "name": "response", + "hidden": null, + "display_name": "Response", + "method": "message_response", + "value": "__UNDEFINED__", + "cache": true, + "required_inputs": null, + "allows_loop": false, + "tool_mode": true + } + ], + "field_order": [ + "agent_llm", + "max_tokens", + "model_kwargs", + "json_mode", + "model_name", + "openai_api_base", + "api_key", + "temperature", + "seed", + "system_prompt", + "tools", + "input_value", + "handle_parsing_errors", + "verbose", + "max_iterations", + "agent_description", + "memory", + "sender", + "sender_name", + "n_messages", + "session_id", + "order", + "template", + "add_current_date_tool" + ], + "beta": false, + "legacy": false, + "edited": false, + "metadata": {}, + "tool_mode": false, + "lf_version": "1.1.3.dev5" + }, + "type": "Agent", + "id": "Agent-VX80X" + }, + "selected": false, + "width": 320, + "height": 645, + "positionAbsolute": { + "x": -758.5154318692233, + "y": 946.8021376995628 + }, + "dragging": false, + "measured": { + "width": 320, + "height": 645 + } + }, + { + "id": "ChatInput-1OpYH", + "type": "genericNode", + "position": { + "x": -1338.346742659103, + "y": 1225.6373656040714 + }, + "data": { + "node": { + "template": { + "_type": "Component", + "files": { + "trace_as_metadata": true, + "file_path": "", + "fileTypes": [ + "txt", + "md", + "mdx", + "csv", + "json", + "yaml", + "yml", + "xml", + "html", + "htm", + "pdf", + "docx", + "py", + "sh", + "sql", + "js", + "ts", + "tsx", + "jpg", + "jpeg", + "png", + "bmp", + "image" + ], + "list": true, + "list_add_label": "Add More", + "required": false, + "placeholder": "", + "show": true, + "name": "files", + "value": "", + "display_name": "Files", + "advanced": true, + "dynamic": false, + "info": "Files to be sent with the message.", + "title_case": false, + "type": "file", + "_input_type": "FileInput" + }, + "background_color": { + "tool_mode": false, + "trace_as_input": true, + "trace_as_metadata": true, + "load_from_db": false, + "list": false, + "list_add_label": "Add More", + "required": false, + "placeholder": "", + "show": true, + "name": "background_color", + "value": "", + "display_name": "Background Color", + "advanced": true, + "input_types": [ + "Message" + ], + "dynamic": false, + "info": "The background color of the icon.", + "title_case": false, + "type": "str", + "_input_type": "MessageTextInput" + }, + "chat_icon": { + "tool_mode": false, + "trace_as_input": true, + "trace_as_metadata": true, + "load_from_db": false, + "list": false, + "list_add_label": "Add More", + "required": false, + "placeholder": "", + "show": true, + "name": "chat_icon", + "value": "", + "display_name": "Icon", + "advanced": true, + "input_types": [ + "Message" + ], + "dynamic": false, + "info": "The icon of the message.", + "title_case": false, + "type": "str", + "_input_type": "MessageTextInput" + }, + "code": { + "type": "code", + "required": true, + "placeholder": "", + "list": false, + "show": true, + "multiline": true, + "value": "from langflow.base.data.utils import IMG_FILE_TYPES, TEXT_FILE_TYPES\nfrom langflow.base.io.chat import ChatComponent\nfrom langflow.inputs import BoolInput\nfrom langflow.io import (\n DropdownInput,\n FileInput,\n MessageTextInput,\n MultilineInput,\n Output,\n)\nfrom langflow.schema.message import Message\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_USER,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatInput(ChatComponent):\n display_name = \"Chat Input\"\n description = \"Get chat inputs from the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatInput\"\n minimized = True\n\n inputs = [\n MultilineInput(\n name=\"input_value\",\n display_name=\"Text\",\n value=\"\",\n info=\"Message to be passed as input.\",\n input_types=[],\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_USER,\n info=\"Type of sender.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_USER,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n FileInput(\n name=\"files\",\n display_name=\"Files\",\n file_types=TEXT_FILE_TYPES + IMG_FILE_TYPES,\n info=\"Files to be sent with the message.\",\n advanced=True,\n is_list=True,\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(display_name=\"Message\", name=\"message\", method=\"message_response\"),\n ]\n\n async def message_response(self) -> Message:\n background_color = self.background_color\n text_color = self.text_color\n icon = self.chat_icon\n\n message = await Message.create(\n text=self.input_value,\n sender=self.sender,\n sender_name=self.sender_name,\n session_id=self.session_id,\n files=self.files,\n properties={\n \"background_color\": background_color,\n \"text_color\": text_color,\n \"icon\": icon,\n },\n )\n if self.session_id and isinstance(message, Message) and self.should_store_message:\n stored_message = await self.send_message(\n message,\n )\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n", + "fileTypes": [], + "file_path": "", + "password": false, + "name": "code", + "advanced": true, + "dynamic": true, + "info": "", + "load_from_db": false, + "title_case": false + }, + "input_value": { + "tool_mode": false, + "trace_as_input": true, + "multiline": true, + "trace_as_metadata": true, + "load_from_db": false, + "list": false, + "list_add_label": "Add More", + "required": false, + "placeholder": "", + "show": true, + "name": "input_value", + "value": "", + "display_name": "Text", + "advanced": false, + "input_types": [], + "dynamic": false, + "info": "Message to be passed as input.", + "title_case": false, + "type": "str", + "_input_type": "MultilineInput" + }, + "sender": { + "tool_mode": false, + "trace_as_metadata": true, + "options": [ + "Machine", + "User" + ], + "options_metadata": [], + "combobox": false, + "dialog_inputs": {}, + "required": false, + "placeholder": "", + "show": true, + "name": "sender", + "value": "User", + "display_name": "Sender Type", + "advanced": true, + "dynamic": false, + "info": "Type of sender.", + "title_case": false, + "type": "str", + "_input_type": "DropdownInput" + }, + "sender_name": { + "tool_mode": false, + "trace_as_input": true, + "trace_as_metadata": true, + "load_from_db": false, + "list": false, + "list_add_label": "Add More", + "required": false, + "placeholder": "", + "show": true, + "name": "sender_name", + "value": "User", + "display_name": "Sender Name", + "advanced": true, + "input_types": [ + "Message" + ], + "dynamic": false, + "info": "Name of the sender.", + "title_case": false, + "type": "str", + "_input_type": "MessageTextInput" + }, + "session_id": { + "tool_mode": false, + "trace_as_input": true, + "trace_as_metadata": true, + "load_from_db": false, + "list": false, + "list_add_label": "Add More", + "required": false, + "placeholder": "", + "show": true, + "name": "session_id", + "value": "", + "display_name": "Session ID", + "advanced": true, + "input_types": [ + "Message" + ], + "dynamic": false, + "info": "The session ID of the chat. If empty, the current session ID parameter will be used.", + "title_case": false, + "type": "str", + "_input_type": "MessageTextInput" + }, + "should_store_message": { + "tool_mode": false, + "trace_as_metadata": true, + "list": false, + "list_add_label": "Add More", + "required": false, + "placeholder": "", + "show": true, + "name": "should_store_message", + "value": true, + "display_name": "Store Messages", + "advanced": true, + "dynamic": false, + "info": "Store the message in the history.", + "title_case": false, + "type": "bool", + "_input_type": "BoolInput" + }, + "text_color": { + "tool_mode": false, + "trace_as_input": true, + "trace_as_metadata": true, + "load_from_db": false, + "list": false, + "list_add_label": "Add More", + "required": false, + "placeholder": "", + "show": true, + "name": "text_color", + "value": "", + "display_name": "Text Color", + "advanced": true, + "input_types": [ + "Message" + ], + "dynamic": false, + "info": "The text color of the name", + "title_case": false, + "type": "str", + "_input_type": "MessageTextInput" + } + }, + "description": "Get chat inputs from the Playground.", + "icon": "MessagesSquare", + "base_classes": [ + "Message" + ], + "display_name": "Chat Input", + "documentation": "", + "minimized": true, + "custom_fields": {}, + "output_types": [], + "pinned": false, + "conditional_paths": [], + "frozen": false, + "outputs": [ + { + "types": [ + "Message" + ], + "selected": "Message", + "name": "message", + "hidden": null, + "display_name": "Message", + "method": "message_response", + "value": "__UNDEFINED__", + "cache": true, + "required_inputs": null, + "allows_loop": false, + "tool_mode": true + } + ], + "field_order": [ + "input_value", + "should_store_message", + "sender", + "sender_name", + "session_id", + "files", + "background_color", + "chat_icon", + "text_color" + ], + "beta": false, + "legacy": false, + "edited": false, + "metadata": {}, + "tool_mode": false, + "lf_version": "1.1.3.dev5" + }, + "type": "ChatInput", + "id": "ChatInput-1OpYH" + }, + "selected": false, + "width": 320, + "height": 233, + "positionAbsolute": { + "x": -1213.7929007529683, + "y": 1374.354376106413 + }, + "dragging": false, + "measured": { + "width": 320, + "height": 233 + } + }, + { + "id": "ComposioAPI-bIRZI", + "type": "genericNode", + "position": { + "x": -1327.8299088045108, + "y": 467.27080748431626 + }, + "data": { + "node": { + "template": { + "_type": "Component", + "auth_link": { + "required": false, + "placeholder": "Click to authenticate", + "show": false, + "name": "auth_link", + "value": "", + "display_name": "Authentication Link", + "advanced": true, + "dynamic": true, + "info": "Click to authenticate with OAuth2", + "title_case": false, + "type": "link", + "_input_type": "LinkInput", + "load_from_db": false + }, + "action_names": { + "tool_mode": false, + "trace_as_metadata": true, + "options": [ + "GMAIL_MODIFY_THREAD_LABELS", + "GMAIL_FETCH_MESSAGE_BY_THREAD_ID", + "GMAIL_SEARCH_PEOPLE", + "GMAIL_LIST_LABELS", + "GMAIL_GET_PEOPLE", + "GMAIL_GET_CONTACTS", + "GMAIL_ADD_LABEL_TO_EMAIL", + "GMAIL_SEND_EMAIL", + "GMAIL_GET_ATTACHMENT", + "GMAIL_GET_PROFILE", + "GMAIL_REPLY_TO_THREAD", + "GMAIL_CREATE_LABEL", + "GMAIL_CREATE_EMAIL_DRAFT", + "GMAIL_REMOVE_LABEL", + "GMAIL_FETCH_MESSAGE_BY_MESSAGE_ID", + "GMAIL_LIST_THREADS", + "GMAIL_FETCH_EMAILS" + ], + "combobox": false, + "list": true, + "list_add_label": "Add More", + "required": true, + "placeholder": "", + "show": true, + "name": "action_names", + "value": [ + "GMAIL_MODIFY_THREAD_LABELS", + "GMAIL_FETCH_EMAILS", + "GMAIL_SEARCH_PEOPLE" + ], + "display_name": "Actions to use", + "advanced": false, + "dynamic": true, + "info": "The actions to pass to agent to execute", + "title_case": false, + "type": "str", + "_input_type": "MultiselectInput" + }, + "api_key": { + "load_from_db": false, + "required": true, + "placeholder": "", + "show": true, + "name": "api_key", + "value": "", + "display_name": "Composio API Key", + "advanced": false, + "input_types": [ + "Message" + ], + "dynamic": false, + "info": "Refer to https://docs.composio.dev/faq/api_key/api_key", + "real_time_refresh": true, + "title_case": false, + "password": true, + "type": "str", + "_input_type": "SecretStrInput" + }, + "app_credentials": { + "load_from_db": false, + "required": false, + "placeholder": "", + "show": false, + "name": "app_credentials", + "value": "", + "display_name": "App Credentials", + "advanced": true, + "input_types": [ + "Message" + ], + "dynamic": true, + "info": "Credentials for app authentication (API Key, Password, etc)", + "title_case": false, + "password": true, + "type": "str", + "_input_type": "SecretStrInput" + }, + "app_names": { + "tool_mode": false, + "trace_as_metadata": true, + "options": [ + "ACCELO", + "AIRTABLE", + "AMAZON", + "APALEO", + "ASANA", + "ATLASSIAN", + "ATTIO", + "AUTH0", + "BATTLENET", + "BITBUCKET", + "BLACKBAUD", + "BLACKBOARD", + "BOLDSIGN", + "BORNEO", + "BOX", + "BRAINTREE", + "BREX", + "BREX_STAGING", + "BRIGHTPEARL", + "CALENDLY", + "CANVA", + "CANVAS", + "CHATWORK", + "CLICKUP", + "CONTENTFUL", + "D2LBRIGHTSPACE", + "DEEL", + "DISCORD", + "DISCORDBOT", + "DOCUSIGN", + "DROPBOX", + "DROPBOX_SIGN", + "DYNAMICS365", + "EPIC_GAMES", + "EVENTBRITE", + "EXIST", + "FACEBOOK", + "FIGMA", + "FITBIT", + "FRESHBOOKS", + "FRONT", + "GITHUB", + "GMAIL", + "GMAIL_BETA", + "GO_TO_WEBINAR", + "GOOGLE_ANALYTICS", + "GOOGLE_DRIVE_BETA", + "GOOGLE_MAPS", + "GOOGLECALENDAR", + "GOOGLEDOCS", + "GOOGLEDRIVE", + "GOOGLEMEET", + "GOOGLEPHOTOS", + "GOOGLESHEETS", + "GOOGLETASKS", + "GORGIAS", + "GUMROAD", + "HARVEST", + "HIGHLEVEL", + "HUBSPOT", + "ICIMS_TALENT_CLOUD", + "INTERCOM", + "JIRA", + "KEAP", + "KLAVIYO", + "LASTPASS", + "LEVER", + "LEVER_SANDBOX", + "LINEAR", + "LINKEDIN", + "LINKHUT", + "MAILCHIMP", + "MICROSOFT_TEAMS", + "MICROSOFT_TENANT", + "MIRO", + "MONDAY", + "MURAL", + "NETSUITE", + "NOTION", + "ONE_DRIVE", + "OUTLOOK", + "PAGERDUTY", + "PIPEDRIVE", + "PRODUCTBOARD", + "REDDIT", + "RING_CENTRAL", + "RIPPLING", + "SAGE", + "SALESFORCE", + "SEISMIC", + "SERVICEM8", + "SHARE_POINT", + "SHOPIFY", + "SLACK", + "SLACKBOT", + "SMARTRECRUITERS", + "SPOTIFY", + "SQUARE", + "STACK_EXCHANGE", + "SURVEY_MONKEY", + "TIMELY", + "TODOIST", + "TONEDEN", + "TRELLO", + "TWITCH", + "TWITTER", + "TWITTER_MEDIA", + "WAKATIME", + "WAVE_ACCOUNTING", + "WEBEX", + "WIZ", + "WRIKE", + "XERO", + "YANDEX", + "YNAB", + "YOUTUBE", + "ZENDESK", + "ZOHO", + "ZOHO_BIGIN", + "ZOHO_BOOKS", + "ZOHO_DESK", + "ZOHO_INVENTORY", + "ZOHO_INVOICE", + "ZOHO_MAIL", + "ZOOM" + ], + "options_metadata": [], + "combobox": false, + "dialog_inputs": {}, + "required": true, + "placeholder": "", + "show": true, + "name": "app_names", + "value": "GMAIL", + "display_name": "App Name", + "advanced": false, + "dynamic": false, + "info": "The app name to use. Please refresh after selecting app name", + "refresh_button": true, + "title_case": false, + "type": "str", + "_input_type": "DropdownInput" + }, + "auth_status": { + "tool_mode": false, + "trace_as_metadata": true, + "load_from_db": false, + "list": false, + "list_add_label": "Add More", + "required": false, + "placeholder": "", + "show": true, + "name": "auth_status", + "value": "✅", + "display_name": "Auth Status", + "advanced": false, + "dynamic": true, + "info": "Current authentication status", + "title_case": false, + "type": "str", + "_input_type": "StrInput" + }, + "code": { + "type": "code", + "required": true, + "placeholder": "", + "list": false, + "show": true, + "multiline": true, + "value": "# Standard library imports\nfrom collections.abc import Sequence\nfrom typing import Any\n\nimport requests\n\n# Third-party imports\nfrom composio.client.collections import AppAuthScheme\nfrom composio.client.exceptions import NoItemsFound\nfrom composio_langchain import Action, ComposioToolSet\nfrom langchain_core.tools import Tool\nfrom loguru import logger\n\n# Local imports\nfrom langflow.base.langchain_utilities.model import LCToolComponent\nfrom langflow.inputs import DropdownInput, LinkInput, MessageTextInput, MultiselectInput, SecretStrInput, StrInput\nfrom langflow.io import Output\n\n\nclass ComposioAPIComponent(LCToolComponent):\n display_name: str = \"Composio Tools\"\n description: str = \"Use Composio toolset to run actions with your agent\"\n name = \"ComposioAPI\"\n icon = \"Composio\"\n documentation: str = \"https://docs.composio.dev\"\n\n inputs = [\n # Basic configuration inputs\n MessageTextInput(name=\"entity_id\", display_name=\"Entity ID\", value=\"default\", advanced=True),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"Composio API Key\",\n required=True,\n info=\"Refer to https://docs.composio.dev/faq/api_key/api_key\",\n real_time_refresh=True,\n ),\n DropdownInput(\n name=\"app_names\",\n display_name=\"App Name\",\n options=[],\n value=\"\",\n info=\"The app name to use. Please refresh after selecting app name\",\n refresh_button=True,\n required=True,\n ),\n # Authentication-related inputs (initially hidden)\n SecretStrInput(\n name=\"app_credentials\",\n display_name=\"App Credentials\",\n required=False,\n dynamic=True,\n show=False,\n info=\"Credentials for app authentication (API Key, Password, etc)\",\n load_from_db=False,\n ),\n MessageTextInput(\n name=\"username\",\n display_name=\"Username\",\n required=False,\n dynamic=True,\n show=False,\n info=\"Username for Basic authentication\",\n ),\n LinkInput(\n name=\"auth_link\",\n display_name=\"Authentication Link\",\n value=\"\",\n info=\"Click to authenticate with OAuth2\",\n dynamic=True,\n show=False,\n placeholder=\"Click to authenticate\",\n ),\n StrInput(\n name=\"auth_status\",\n display_name=\"Auth Status\",\n value=\"Not Connected\",\n info=\"Current authentication status\",\n dynamic=True,\n show=False,\n ),\n MultiselectInput(\n name=\"action_names\",\n display_name=\"Actions to use\",\n required=True,\n options=[],\n value=[],\n info=\"The actions to pass to agent to execute\",\n dynamic=True,\n show=False,\n ),\n ]\n\n outputs = [\n Output(name=\"tools\", display_name=\"Tools\", method=\"build_tool\"),\n ]\n\n def _check_for_authorization(self, app: str) -> str:\n \"\"\"Checks if the app is authorized.\n\n Args:\n app (str): The app name to check authorization for.\n\n Returns:\n str: The authorization status or URL.\n \"\"\"\n toolset = self._build_wrapper()\n entity = toolset.client.get_entity(id=self.entity_id)\n try:\n # Check if user is already connected\n entity.get_connection(app=app)\n except NoItemsFound:\n # Get auth scheme for the app\n auth_scheme = self._get_auth_scheme(app)\n return self._handle_auth_by_scheme(entity, app, auth_scheme)\n except Exception: # noqa: BLE001\n logger.exception(\"Authorization error\")\n return \"Error checking authorization\"\n else:\n return f\"{app} CONNECTED\"\n\n def _get_auth_scheme(self, app_name: str) -> AppAuthScheme:\n \"\"\"Get the primary auth scheme for an app.\n\n Args:\n app_name (str): The name of the app to get auth scheme for.\n\n Returns:\n AppAuthScheme: The auth scheme details.\n \"\"\"\n toolset = self._build_wrapper()\n try:\n return toolset.get_auth_scheme_for_app(app=app_name.lower())\n except Exception: # noqa: BLE001\n logger.exception(f\"Error getting auth scheme for {app_name}\")\n return None\n\n def _get_oauth_apps(self, api_key: str) -> list[str]:\n \"\"\"Fetch OAuth-enabled apps from Composio API.\n\n Args:\n api_key (str): The Composio API key.\n\n Returns:\n list[str]: A list containing OAuth-enabled app names.\n \"\"\"\n oauth_apps = []\n try:\n url = \"https://backend.composio.dev/api/v1/apps\"\n headers = {\"x-api-key\": api_key}\n params = {\n \"includeLocal\": \"true\",\n \"additionalFields\": \"auth_schemes\",\n \"sortBy\": \"alphabet\",\n }\n\n response = requests.get(url, headers=headers, params=params, timeout=20)\n data = response.json()\n\n for item in data.get(\"items\", []):\n for auth_scheme in item.get(\"auth_schemes\", []):\n if auth_scheme.get(\"mode\") in [\"OAUTH1\", \"OAUTH2\"]:\n oauth_apps.append(item[\"key\"].upper())\n break\n except requests.RequestException as e:\n logger.error(f\"Error fetching OAuth apps: {e}\")\n return []\n else:\n return oauth_apps\n\n def _handle_auth_by_scheme(self, entity: Any, app: str, auth_scheme: AppAuthScheme) -> str:\n \"\"\"Handle authentication based on the auth scheme.\n\n Args:\n entity (Any): The entity instance.\n app (str): The app name.\n auth_scheme (AppAuthScheme): The auth scheme details.\n\n Returns:\n str: The authentication status or URL.\n \"\"\"\n auth_mode = auth_scheme.auth_mode\n\n try:\n # First check if already connected\n entity.get_connection(app=app)\n except NoItemsFound:\n # If not connected, handle new connection based on auth mode\n if auth_mode == \"API_KEY\":\n if hasattr(self, \"app_credentials\") and self.app_credentials:\n try:\n entity.initiate_connection(\n app_name=app,\n auth_mode=\"API_KEY\",\n auth_config={\"api_key\": self.app_credentials},\n use_composio_auth=False,\n force_new_integration=True,\n )\n except Exception as e: # noqa: BLE001\n logger.error(f\"Error connecting with API Key: {e}\")\n return \"Invalid API Key\"\n else:\n return f\"{app} CONNECTED\"\n return \"Enter API Key\"\n\n if (\n auth_mode == \"BASIC\"\n and hasattr(self, \"username\")\n and hasattr(self, \"app_credentials\")\n and self.username\n and self.app_credentials\n ):\n try:\n entity.initiate_connection(\n app_name=app,\n auth_mode=\"BASIC\",\n auth_config={\"username\": self.username, \"password\": self.app_credentials},\n use_composio_auth=False,\n force_new_integration=True,\n )\n except Exception as e: # noqa: BLE001\n logger.error(f\"Error connecting with Basic Auth: {e}\")\n return \"Invalid credentials\"\n else:\n return f\"{app} CONNECTED\"\n elif auth_mode == \"BASIC\":\n return \"Enter Username and Password\"\n\n if auth_mode == \"OAUTH2\":\n try:\n return self._initiate_default_connection(entity, app)\n except Exception as e: # noqa: BLE001\n logger.error(f\"Error initiating OAuth2: {e}\")\n return \"OAuth2 initialization failed\"\n\n return \"Unsupported auth mode\"\n except Exception as e: # noqa: BLE001\n logger.error(f\"Error checking connection status: {e}\")\n return f\"Error: {e!s}\"\n else:\n return f\"{app} CONNECTED\"\n\n def _initiate_default_connection(self, entity: Any, app: str) -> str:\n connection = entity.initiate_connection(app_name=app, use_composio_auth=True, force_new_integration=True)\n return connection.redirectUrl\n\n def _get_connected_app_names_for_entity(self) -> list[str]:\n toolset = self._build_wrapper()\n connections = toolset.client.get_entity(id=self.entity_id).get_connections()\n return list({connection.appUniqueId for connection in connections})\n\n def _get_normalized_app_name(self) -> str:\n \"\"\"Get app name without connection status suffix.\n\n Returns:\n str: Normalized app name.\n \"\"\"\n return self.app_names.replace(\" ✅\", \"\").replace(\"_connected\", \"\")\n\n def update_build_config(self, build_config: dict, field_value: Any, field_name: str | None = None) -> dict: # noqa: ARG002\n # Update the available apps options from the API\n if hasattr(self, \"api_key\") and self.api_key != \"\":\n toolset = self._build_wrapper()\n build_config[\"app_names\"][\"options\"] = self._get_oauth_apps(api_key=self.api_key)\n\n # First, ensure all dynamic fields are hidden by default\n dynamic_fields = [\"app_credentials\", \"username\", \"auth_link\", \"auth_status\", \"action_names\"]\n for field in dynamic_fields:\n if field in build_config:\n if build_config[field][\"value\"] is None or build_config[field][\"value\"] == \"\":\n build_config[field][\"show\"] = False\n build_config[field][\"advanced\"] = True\n build_config[field][\"load_from_db\"] = False\n else:\n build_config[field][\"show\"] = True\n build_config[field][\"advanced\"] = False\n\n if field_name == \"app_names\" and (not hasattr(self, \"app_names\") or not self.app_names):\n build_config[\"auth_status\"][\"show\"] = True\n build_config[\"auth_status\"][\"value\"] = \"Please select an app first\"\n return build_config\n\n if field_name == \"app_names\" and hasattr(self, \"api_key\") and self.api_key != \"\":\n # app_name = self._get_normalized_app_name()\n app_name = self.app_names\n try:\n toolset = self._build_wrapper()\n entity = toolset.client.get_entity(id=self.entity_id)\n\n # Always show auth_status when app is selected\n build_config[\"auth_status\"][\"show\"] = True\n build_config[\"auth_status\"][\"advanced\"] = False\n\n try:\n # Check if already connected\n entity.get_connection(app=app_name)\n build_config[\"auth_status\"][\"value\"] = \"✅\"\n build_config[\"auth_link\"][\"show\"] = False\n # Show action selection for connected apps\n build_config[\"action_names\"][\"show\"] = True\n build_config[\"action_names\"][\"advanced\"] = False\n\n except NoItemsFound:\n # Get auth scheme and show relevant fields\n auth_scheme = self._get_auth_scheme(app_name)\n auth_mode = auth_scheme.auth_mode\n logger.info(f\"Auth mode for {app_name}: {auth_mode}\")\n\n if auth_mode == \"API_KEY\":\n build_config[\"app_credentials\"][\"show\"] = True\n build_config[\"app_credentials\"][\"advanced\"] = False\n build_config[\"app_credentials\"][\"display_name\"] = \"API Key\"\n build_config[\"auth_status\"][\"value\"] = \"Enter API Key\"\n\n elif auth_mode == \"BASIC\":\n build_config[\"username\"][\"show\"] = True\n build_config[\"username\"][\"advanced\"] = False\n build_config[\"app_credentials\"][\"show\"] = True\n build_config[\"app_credentials\"][\"advanced\"] = False\n build_config[\"app_credentials\"][\"display_name\"] = \"Password\"\n build_config[\"auth_status\"][\"value\"] = \"Enter Username and Password\"\n\n elif auth_mode == \"OAUTH2\":\n build_config[\"auth_link\"][\"show\"] = True\n build_config[\"auth_link\"][\"advanced\"] = False\n auth_url = self._initiate_default_connection(entity, app_name)\n build_config[\"auth_link\"][\"value\"] = auth_url\n build_config[\"auth_status\"][\"value\"] = \"Click link to authenticate\"\n\n else:\n build_config[\"auth_status\"][\"value\"] = \"Unsupported auth mode\"\n\n # Update action names if connected\n if build_config[\"auth_status\"][\"value\"] == \"✅\":\n all_action_names = [str(action).replace(\"Action.\", \"\") for action in Action.all()]\n app_action_names = [\n action_name\n for action_name in all_action_names\n if action_name.lower().startswith(app_name.lower() + \"_\")\n ]\n if build_config[\"action_names\"][\"options\"] != app_action_names:\n build_config[\"action_names\"][\"options\"] = app_action_names\n build_config[\"action_names\"][\"value\"] = [app_action_names[0]] if app_action_names else [\"\"]\n\n except Exception as e: # noqa: BLE001\n logger.error(f\"Error checking auth status: {e}, app: {app_name}\")\n build_config[\"auth_status\"][\"value\"] = f\"Error: {e!s}\"\n\n return build_config\n\n def build_tool(self) -> Sequence[Tool]:\n \"\"\"Build Composio tools based on selected actions.\n\n Returns:\n Sequence[Tool]: List of configured Composio tools.\n \"\"\"\n composio_toolset = self._build_wrapper()\n return composio_toolset.get_tools(actions=self.action_names)\n\n def _build_wrapper(self) -> ComposioToolSet:\n \"\"\"Build the Composio toolset wrapper.\n\n Returns:\n ComposioToolSet: The initialized toolset.\n\n Raises:\n ValueError: If the API key is not found or invalid.\n \"\"\"\n try:\n if not self.api_key:\n msg = \"Composio API Key is required\"\n raise ValueError(msg)\n return ComposioToolSet(api_key=self.api_key)\n except ValueError as e:\n logger.error(f\"Error building Composio wrapper: {e}\")\n msg = \"Please provide a valid Composio API Key in the component settings\"\n raise ValueError(msg) from e\n", + "fileTypes": [], + "file_path": "", + "password": false, + "name": "code", + "advanced": true, + "dynamic": true, + "info": "", + "load_from_db": false, + "title_case": false + }, + "entity_id": { + "tool_mode": false, + "trace_as_input": true, + "trace_as_metadata": true, + "load_from_db": false, + "list": false, + "list_add_label": "Add More", + "required": false, + "placeholder": "", + "show": true, + "name": "entity_id", + "value": "default", + "display_name": "Entity ID", + "advanced": true, + "input_types": [ + "Message" + ], + "dynamic": false, + "info": "", + "title_case": false, + "type": "str", + "_input_type": "MessageTextInput" + }, + "username": { + "tool_mode": false, + "trace_as_input": true, + "trace_as_metadata": true, + "load_from_db": false, + "list": false, + "list_add_label": "Add More", + "required": false, + "placeholder": "", + "show": false, + "name": "username", + "value": "", + "display_name": "Username", + "advanced": true, + "input_types": [ + "Message" + ], + "dynamic": true, + "info": "Username for Basic authentication", + "title_case": false, + "type": "str", + "_input_type": "MessageTextInput" + } + }, + "description": "Use Composio toolset to run actions with your agent", + "icon": "Composio", + "base_classes": [ + "Tool" + ], + "display_name": "Composio Tools", + "documentation": "https://docs.composio.dev", + "minimized": false, + "custom_fields": {}, + "output_types": [], + "pinned": false, + "conditional_paths": [], + "frozen": false, + "outputs": [ + { + "types": [ + "Tool" + ], + "selected": "Tool", + "name": "tools", + "hidden": null, + "display_name": "Tools", + "method": "build_tool", + "value": "__UNDEFINED__", + "cache": true, + "required_inputs": null, + "allows_loop": false, + "tool_mode": true + } + ], + "field_order": [ + "entity_id", + "api_key", + "app_names", + "app_credentials", + "username", + "auth_link", + "auth_status", + "action_names" + ], + "beta": false, + "legacy": false, + "edited": false, + "metadata": {}, + "tool_mode": false, + "category": "composio", + "key": "ComposioAPI", + "score": 0.0000025911107559075892, + "lf_version": "1.1.3.dev5" + }, + "showNode": true, + "type": "ComposioAPI", + "id": "ComposioAPI-bIRZI" + }, + "selected": false, + "measured": { + "width": 320, + "height": 495 + }, + "dragging": false + }, + { + "id": "OpenAIModel-WX4ZH", + "type": "genericNode", + "position": { + "x": -261.15668613846026, + "y": 470.4905565668116 + }, + "data": { + "node": { + "template": { + "_type": "Component", + "api_key": { + "load_from_db": false, + "required": true, + "placeholder": "", + "show": true, + "name": "api_key", + "value": "", + "display_name": "OpenAI API Key", + "advanced": false, + "input_types": [ + "Message" + ], + "dynamic": false, + "info": "The OpenAI API Key to use for the OpenAI model.", + "title_case": false, + "password": true, + "type": "str", + "_input_type": "SecretStrInput" + }, + "code": { + "type": "code", + "required": true, + "placeholder": "", + "list": false, + "show": true, + "multiline": true, + "value": "from langchain_openai import ChatOpenAI\nfrom pydantic.v1 import SecretStr\n\nfrom langflow.base.models.model import LCModelComponent\nfrom langflow.base.models.openai_constants import OPENAI_MODEL_NAMES\nfrom langflow.field_typing import LanguageModel\nfrom langflow.field_typing.range_spec import RangeSpec\nfrom langflow.inputs import BoolInput, DictInput, DropdownInput, IntInput, SecretStrInput, SliderInput, StrInput\n\n\nclass OpenAIModelComponent(LCModelComponent):\n display_name = \"OpenAI\"\n description = \"Generates text using OpenAI LLMs.\"\n icon = \"OpenAI\"\n name = \"OpenAIModel\"\n\n inputs = [\n *LCModelComponent._base_inputs,\n IntInput(\n name=\"max_tokens\",\n display_name=\"Max Tokens\",\n advanced=True,\n info=\"The maximum number of tokens to generate. Set to 0 for unlimited tokens.\",\n range_spec=RangeSpec(min=0, max=128000),\n ),\n DictInput(\n name=\"model_kwargs\",\n display_name=\"Model Kwargs\",\n advanced=True,\n info=\"Additional keyword arguments to pass to the model.\",\n ),\n BoolInput(\n name=\"json_mode\",\n display_name=\"JSON Mode\",\n advanced=True,\n info=\"If True, it will output JSON regardless of passing a schema.\",\n ),\n DropdownInput(\n name=\"model_name\",\n display_name=\"Model Name\",\n advanced=False,\n options=OPENAI_MODEL_NAMES,\n value=OPENAI_MODEL_NAMES[0],\n ),\n StrInput(\n name=\"openai_api_base\",\n display_name=\"OpenAI API Base\",\n advanced=True,\n info=\"The base URL of the OpenAI API. \"\n \"Defaults to https://api.openai.com/v1. \"\n \"You can change this to use other APIs like JinaChat, LocalAI and Prem.\",\n ),\n SecretStrInput(\n name=\"api_key\",\n display_name=\"OpenAI API Key\",\n info=\"The OpenAI API Key to use for the OpenAI model.\",\n advanced=False,\n value=\"OPENAI_API_KEY\",\n required=True,\n ),\n SliderInput(\n name=\"temperature\", display_name=\"Temperature\", value=0.1, range_spec=RangeSpec(min=0, max=2, step=0.01)\n ),\n IntInput(\n name=\"seed\",\n display_name=\"Seed\",\n info=\"The seed controls the reproducibility of the job.\",\n advanced=True,\n value=1,\n ),\n ]\n\n def build_model(self) -> LanguageModel: # type: ignore[type-var]\n openai_api_key = self.api_key\n temperature = self.temperature\n model_name: str = self.model_name\n max_tokens = self.max_tokens\n model_kwargs = self.model_kwargs or {}\n openai_api_base = self.openai_api_base or \"https://api.openai.com/v1\"\n json_mode = self.json_mode\n seed = self.seed\n\n api_key = SecretStr(openai_api_key).get_secret_value() if openai_api_key else None\n output = ChatOpenAI(\n max_tokens=max_tokens or None,\n model_kwargs=model_kwargs,\n model=model_name,\n base_url=openai_api_base,\n api_key=api_key,\n temperature=temperature if temperature is not None else 0.1,\n seed=seed,\n )\n if json_mode:\n output = output.bind(response_format={\"type\": \"json_object\"})\n\n return output\n\n def _get_exception_message(self, e: Exception):\n \"\"\"Get a message from an OpenAI exception.\n\n Args:\n e (Exception): The exception to get the message from.\n\n Returns:\n str: The message from the exception.\n \"\"\"\n try:\n from openai import BadRequestError\n except ImportError:\n return None\n if isinstance(e, BadRequestError):\n message = e.body.get(\"message\")\n if message:\n return message\n return None\n", + "fileTypes": [], + "file_path": "", + "password": false, + "name": "code", + "advanced": true, + "dynamic": true, + "info": "", + "load_from_db": false, + "title_case": false + }, + "input_value": { + "trace_as_input": true, + "tool_mode": false, + "trace_as_metadata": true, + "load_from_db": false, + "list": false, + "list_add_label": "Add More", + "required": false, + "placeholder": "", + "show": true, + "name": "input_value", + "value": "", + "display_name": "Input", + "advanced": false, + "input_types": [ + "Message" + ], + "dynamic": false, + "info": "", + "title_case": false, + "type": "str", + "_input_type": "MessageInput" + }, + "json_mode": { + "tool_mode": false, + "trace_as_metadata": true, + "list": false, + "list_add_label": "Add More", + "required": false, + "placeholder": "", + "show": true, + "name": "json_mode", + "value": false, + "display_name": "JSON Mode", + "advanced": true, + "dynamic": false, + "info": "If True, it will output JSON regardless of passing a schema.", + "title_case": false, + "type": "bool", + "_input_type": "BoolInput" + }, + "max_tokens": { + "tool_mode": false, + "trace_as_metadata": true, + "range_spec": { + "step_type": "float", + "min": 0, + "max": 128000, + "step": 0.1 + }, + "list": false, + "list_add_label": "Add More", + "required": false, + "placeholder": "", + "show": true, + "name": "max_tokens", + "value": "", + "display_name": "Max Tokens", + "advanced": true, + "dynamic": false, + "info": "The maximum number of tokens to generate. Set to 0 for unlimited tokens.", + "title_case": false, + "type": "int", + "_input_type": "IntInput" + }, + "model_kwargs": { + "tool_mode": false, + "trace_as_input": true, + "list": false, + "list_add_label": "Add More", + "required": false, + "placeholder": "", + "show": true, + "name": "model_kwargs", + "value": {}, + "display_name": "Model Kwargs", + "advanced": true, + "dynamic": false, + "info": "Additional keyword arguments to pass to the model.", + "title_case": false, + "type": "dict", + "_input_type": "DictInput" + }, + "model_name": { + "tool_mode": false, + "trace_as_metadata": true, + "options": [ + "gpt-4o-mini", + "gpt-4o", + "gpt-4-turbo", + "gpt-4-turbo-preview", + "gpt-4", + "gpt-3.5-turbo", + "gpt-3.5-turbo-0125" + ], + "options_metadata": [], + "combobox": false, + "dialog_inputs": {}, + "required": false, + "placeholder": "", + "show": true, + "name": "model_name", + "value": "gpt-4o-mini", + "display_name": "Model Name", + "advanced": false, + "dynamic": false, + "info": "", + "title_case": false, + "type": "str", + "_input_type": "DropdownInput" + }, + "openai_api_base": { + "tool_mode": false, + "trace_as_metadata": true, + "load_from_db": false, + "list": false, + "list_add_label": "Add More", + "required": false, + "placeholder": "", + "show": true, + "name": "openai_api_base", + "value": "", + "display_name": "OpenAI API Base", + "advanced": true, + "dynamic": false, + "info": "The base URL of the OpenAI API. Defaults to https://api.openai.com/v1. You can change this to use other APIs like JinaChat, LocalAI and Prem.", + "title_case": false, + "type": "str", + "_input_type": "StrInput" + }, + "seed": { + "tool_mode": false, + "trace_as_metadata": true, + "list": false, + "list_add_label": "Add More", + "required": false, + "placeholder": "", + "show": true, + "name": "seed", + "value": 1, + "display_name": "Seed", + "advanced": true, + "dynamic": false, + "info": "The seed controls the reproducibility of the job.", + "title_case": false, + "type": "int", + "_input_type": "IntInput" + }, + "stream": { + "tool_mode": false, + "trace_as_metadata": true, + "list": false, + "list_add_label": "Add More", + "required": false, + "placeholder": "", + "show": true, + "name": "stream", + "value": false, + "display_name": "Stream", + "advanced": false, + "dynamic": false, + "info": "Stream the response from the model. Streaming works only in Chat.", + "title_case": false, + "type": "bool", + "_input_type": "BoolInput" + }, + "system_message": { + "tool_mode": false, + "trace_as_input": true, + "multiline": true, + "trace_as_metadata": true, + "load_from_db": false, + "list": false, + "list_add_label": "Add More", + "required": false, + "placeholder": "", + "show": true, + "name": "system_message", + "value": "Provide me only the URL only", + "display_name": "System Message", + "advanced": false, + "input_types": [ + "Message" + ], + "dynamic": false, + "info": "System message to pass to the model.", + "title_case": false, + "type": "str", + "_input_type": "MultilineInput" + }, + "temperature": { + "tool_mode": false, + "min_label": "", + "max_label": "", + "min_label_icon": "", + "max_label_icon": "", + "slider_buttons": false, + "slider_buttons_options": [], + "slider_input": false, + "range_spec": { + "step_type": "float", + "min": 0, + "max": 2, + "step": 0.01 + }, + "required": false, + "placeholder": "", + "show": true, + "name": "temperature", + "value": 0.1, + "display_name": "Temperature", + "advanced": false, + "dynamic": false, + "info": "", + "title_case": false, + "type": "slider", + "_input_type": "SliderInput" + } + }, + "description": "Generates text using OpenAI LLMs.", + "icon": "OpenAI", + "base_classes": [ + "LanguageModel", + "Message" + ], + "display_name": "OpenAI", + "documentation": "", + "minimized": false, + "custom_fields": {}, + "output_types": [], + "pinned": false, + "conditional_paths": [], + "frozen": false, + "outputs": [ + { + "types": [ + "Message" + ], + "selected": "Message", + "name": "text_output", + "display_name": "Message", + "method": "text_response", + "value": "__UNDEFINED__", + "cache": true, + "required_inputs": [], + "allows_loop": false, + "tool_mode": true + }, + { + "types": [ + "LanguageModel" + ], + "selected": "LanguageModel", + "name": "model_output", + "display_name": "Language Model", + "method": "build_model", + "value": "__UNDEFINED__", + "cache": true, + "required_inputs": [ + "api_key" + ], + "allows_loop": false, + "tool_mode": true + } + ], + "field_order": [ + "input_value", + "system_message", + "stream", + "max_tokens", + "model_kwargs", + "json_mode", + "model_name", + "openai_api_base", + "api_key", + "temperature", + "seed" + ], + "beta": false, + "legacy": false, + "edited": false, + "metadata": {}, + "tool_mode": false, + "category": "models", + "key": "OpenAIModel", + "score": 0.001, + "lf_version": "1.1.3.dev5" + }, + "showNode": true, + "type": "OpenAIModel", + "id": "OpenAIModel-WX4ZH" + }, + "selected": false, + "measured": { + "width": 320, + "height": 653 + }, + "dragging": false + }, + { + "id": "ParseData-qQwA0", + "type": "genericNode", + "position": { + "x": 675.2876939620461, + "y": 1117.6057052071612 + }, + "data": { + "node": { + "template": { + "_type": "Component", + "data": { + "tool_mode": false, + "trace_as_metadata": true, + "list": true, + "list_add_label": "Add More", + "trace_as_input": true, + "required": true, + "placeholder": "", + "show": true, + "name": "data", + "value": "", + "display_name": "Data", + "advanced": false, + "input_types": [ + "Data" + ], + "dynamic": false, + "info": "The data to convert to text.", + "title_case": false, + "type": "other", + "_input_type": "DataInput" + }, + "code": { + "type": "code", + "required": true, + "placeholder": "", + "list": false, + "show": true, + "multiline": true, + "value": "from langflow.custom import Component\nfrom langflow.helpers.data import data_to_text, data_to_text_list\nfrom langflow.io import DataInput, MultilineInput, Output, StrInput\nfrom langflow.schema import Data\nfrom langflow.schema.message import Message\n\n\nclass ParseDataComponent(Component):\n display_name = \"Data to Message\"\n description = \"Convert Data objects into Messages using any {field_name} from input data.\"\n icon = \"message-square\"\n name = \"ParseData\"\n\n inputs = [\n DataInput(name=\"data\", display_name=\"Data\", info=\"The data to convert to text.\", is_list=True, required=True),\n MultilineInput(\n name=\"template\",\n display_name=\"Template\",\n info=\"The template to use for formatting the data. \"\n \"It can contain the keys {text}, {data} or any other key in the Data.\",\n value=\"{text}\",\n required=True,\n ),\n StrInput(name=\"sep\", display_name=\"Separator\", advanced=True, value=\"\\n\"),\n ]\n\n outputs = [\n Output(\n display_name=\"Message\",\n name=\"text\",\n info=\"Data as a single Message, with each input Data separated by Separator\",\n method=\"parse_data\",\n ),\n Output(\n display_name=\"Data List\",\n name=\"data_list\",\n info=\"Data as a list of new Data, each having `text` formatted by Template\",\n method=\"parse_data_as_list\",\n ),\n ]\n\n def _clean_args(self) -> tuple[list[Data], str, str]:\n data = self.data if isinstance(self.data, list) else [self.data]\n template = self.template\n sep = self.sep\n return data, template, sep\n\n def parse_data(self) -> Message:\n data, template, sep = self._clean_args()\n result_string = data_to_text(template, data, sep)\n self.status = result_string\n return Message(text=result_string)\n\n def parse_data_as_list(self) -> list[Data]:\n data, template, _ = self._clean_args()\n text_list, data_list = data_to_text_list(template, data)\n for item, text in zip(data_list, text_list, strict=True):\n item.set_text(text)\n self.status = data_list\n return data_list\n", + "fileTypes": [], + "file_path": "", + "password": false, + "name": "code", + "advanced": true, + "dynamic": true, + "info": "", + "load_from_db": false, + "title_case": false + }, + "sep": { + "tool_mode": false, + "trace_as_metadata": true, + "load_from_db": false, + "list": false, + "list_add_label": "Add More", + "required": false, + "placeholder": "", + "show": true, + "name": "sep", + "value": "\n", + "display_name": "Separator", + "advanced": true, + "dynamic": false, + "info": "", + "title_case": false, + "type": "str", + "_input_type": "StrInput" + }, + "template": { + "tool_mode": false, + "trace_as_input": true, + "multiline": true, + "trace_as_metadata": true, + "load_from_db": false, + "list": false, + "list_add_label": "Add More", + "required": true, + "placeholder": "", + "show": true, + "name": "template", + "value": "{text}", + "display_name": "Template", + "advanced": false, + "input_types": [ + "Message" + ], + "dynamic": false, + "info": "The template to use for formatting the data. It can contain the keys {text}, {data} or any other key in the Data.", + "title_case": false, + "type": "str", + "_input_type": "MultilineInput" + } + }, + "description": "Convert Data objects into Messages using any {field_name} from input data.", + "icon": "message-square", + "base_classes": [ + "Data", + "Message" + ], + "display_name": "Data to Message", + "documentation": "", + "minimized": false, + "custom_fields": {}, + "output_types": [], + "pinned": false, + "conditional_paths": [], + "frozen": false, + "outputs": [ + { + "types": [ + "Message" + ], + "selected": "Message", + "name": "text", + "display_name": "Message", + "method": "parse_data", + "value": "__UNDEFINED__", + "cache": true, + "allows_loop": false, + "tool_mode": true + }, + { + "types": [ + "Data" + ], + "selected": "Data", + "name": "data_list", + "display_name": "Data List", + "method": "parse_data_as_list", + "value": "__UNDEFINED__", + "cache": true, + "allows_loop": false, + "tool_mode": true + } + ], + "field_order": [ + "data", + "template", + "sep" + ], + "beta": false, + "legacy": false, + "edited": false, + "metadata": {}, + "tool_mode": false, + "category": "processing", + "key": "ParseData", + "score": 0.01857804455091699, + "lf_version": "1.1.3.dev5" + }, + "showNode": true, + "type": "ParseData", + "id": "ParseData-qQwA0" + }, + "selected": false, + "measured": { + "width": 320, + "height": 341 + }, + "dragging": false + }, + { + "id": "URL-ohTze", + "type": "genericNode", + "position": { + "x": 224.84521858186594, + "y": 611.4438869428261 + }, + "data": { + "id": "URL-ohTze", + "node": { + "base_classes": [ + "Data", + "DataFrame", + "Message" + ], + "beta": false, + "category": "data", + "conditional_paths": [], + "custom_fields": {}, + "description": "Load and retrive data from specified URLs.", + "display_name": "URL", + "documentation": "", + "edited": false, + "field_order": [ + "urls", + "format" + ], + "frozen": false, + "icon": "layout-template", + "key": "URL", + "legacy": false, + "metadata": {}, + "minimized": false, + "output_types": [], + "outputs": [ + { + "allows_loop": false, + "cache": true, + "display_name": "Data", + "method": "fetch_content", + "name": "data", + "selected": "Data", + "tool_mode": true, + "types": [ + "Data" + ], + "value": "__UNDEFINED__" + }, + { + "allows_loop": false, + "cache": true, + "display_name": "Message", + "method": "fetch_content_text", + "name": "text", + "selected": "Message", + "tool_mode": true, + "types": [ + "Message" + ], + "value": "__UNDEFINED__" + }, + { + "allows_loop": false, + "cache": true, + "display_name": "DataFrame", + "method": "as_dataframe", + "name": "dataframe", + "selected": "DataFrame", + "tool_mode": true, + "types": [ + "DataFrame" + ], + "value": "__UNDEFINED__" + } + ], + "pinned": false, + "score": 2.220446049250313e-16, + "template": { + "_type": "Component", + "code": { + "advanced": true, + "dynamic": true, + "fileTypes": [], + "file_path": "", + "info": "", + "list": false, + "load_from_db": false, + "multiline": true, + "name": "code", + "password": false, + "placeholder": "", + "required": true, + "show": true, + "title_case": false, + "type": "code", + "value": "import re\n\nfrom langchain_community.document_loaders import AsyncHtmlLoader, WebBaseLoader\n\nfrom langflow.custom import Component\nfrom langflow.helpers.data import data_to_text\nfrom langflow.io import DropdownInput, MessageTextInput, Output\nfrom langflow.schema import Data\nfrom langflow.schema.dataframe import DataFrame\nfrom langflow.schema.message import Message\n\n\nclass URLComponent(Component):\n display_name = \"URL\"\n description = \"Load and retrive data from specified URLs.\"\n icon = \"layout-template\"\n name = \"URL\"\n\n inputs = [\n MessageTextInput(\n name=\"urls\",\n display_name=\"URLs\",\n is_list=True,\n tool_mode=True,\n placeholder=\"Enter a URL...\",\n list_add_label=\"Add URL\",\n ),\n DropdownInput(\n name=\"format\",\n display_name=\"Output Format\",\n info=\"Output Format. Use 'Text' to extract the text from the HTML or 'Raw HTML' for the raw HTML content.\",\n options=[\"Text\", \"Raw HTML\"],\n value=\"Text\",\n ),\n ]\n\n outputs = [\n Output(display_name=\"Data\", name=\"data\", method=\"fetch_content\"),\n Output(display_name=\"Message\", name=\"text\", method=\"fetch_content_text\"),\n Output(display_name=\"DataFrame\", name=\"dataframe\", method=\"as_dataframe\"),\n ]\n\n def ensure_url(self, string: str) -> str:\n \"\"\"Ensures the given string is a URL by adding 'http://' if it doesn't start with 'http://' or 'https://'.\n\n Raises an error if the string is not a valid URL.\n\n Parameters:\n string (str): The string to be checked and possibly modified.\n\n Returns:\n str: The modified string that is ensured to be a URL.\n\n Raises:\n ValueError: If the string is not a valid URL.\n \"\"\"\n if not string.startswith((\"http://\", \"https://\")):\n string = \"http://\" + string\n\n # Basic URL validation regex\n url_regex = re.compile(\n r\"^(https?:\\/\\/)?\" # optional protocol\n r\"(www\\.)?\" # optional www\n r\"([a-zA-Z0-9.-]+)\" # domain\n r\"(\\.[a-zA-Z]{2,})?\" # top-level domain\n r\"(:\\d+)?\" # optional port\n r\"(\\/[^\\s]*)?$\", # optional path\n re.IGNORECASE,\n )\n\n if not url_regex.match(string):\n msg = f\"Invalid URL: {string}\"\n raise ValueError(msg)\n\n return string\n\n def fetch_content(self) -> list[Data]:\n urls = [self.ensure_url(url.strip()) for url in self.urls if url.strip()]\n if self.format == \"Raw HTML\":\n loader = AsyncHtmlLoader(web_path=urls, encoding=\"utf-8\")\n else:\n loader = WebBaseLoader(web_paths=urls, encoding=\"utf-8\")\n docs = loader.load()\n data = [Data(text=doc.page_content, **doc.metadata) for doc in docs]\n self.status = data\n return data\n\n def fetch_content_text(self) -> Message:\n data = self.fetch_content()\n\n result_string = data_to_text(\"{text}\", data)\n self.status = result_string\n return Message(text=result_string)\n\n def as_dataframe(self) -> DataFrame:\n return DataFrame(self.fetch_content())\n" + }, + "format": { + "_input_type": "DropdownInput", + "advanced": false, + "combobox": false, + "dialog_inputs": {}, + "display_name": "Output Format", + "dynamic": false, + "info": "Output Format. Use 'Text' to extract the text from the HTML or 'Raw HTML' for the raw HTML content.", + "name": "format", + "options": [ + "Text", + "Raw HTML" + ], + "options_metadata": [], + "placeholder": "", + "required": false, + "show": true, + "title_case": false, + "tool_mode": false, + "trace_as_metadata": true, + "type": "str", + "value": "Text" + }, + "urls": { + "_input_type": "MessageTextInput", + "advanced": false, + "display_name": "URLs", + "dynamic": false, + "info": "", + "input_types": [ + "Message" + ], + "list": true, + "list_add_label": "Add URL", + "load_from_db": false, + "name": "urls", + "placeholder": "Enter a URL...", + "required": false, + "show": true, + "title_case": false, + "tool_mode": true, + "trace_as_input": true, + "trace_as_metadata": true, + "type": "str", + "value": [ + "" + ] + } + }, + "tool_mode": false, + "lf_version": "1.1.3.dev5" + }, + "showNode": true, + "type": "URL" + }, + "selected": false, + "measured": { + "width": 320, + "height": 415 + }, + "dragging": false + }, + { + "id": "ChatOutput-ngjfd", + "type": "genericNode", + "position": { + "x": -359.9074091892667, + "y": 1288.8167922508599 + }, + "data": { + "description": "Display a chat message in the Playground.", + "display_name": "Chat Output", + "id": "ChatOutput-ngjfd", + "node": { + "template": { + "_type": "Component", + "background_color": { + "tool_mode": false, + "trace_as_input": true, + "trace_as_metadata": true, + "load_from_db": false, + "list": false, + "list_add_label": "Add More", + "required": false, + "placeholder": "", + "show": true, + "name": "background_color", + "value": "", + "display_name": "Background Color", + "advanced": true, + "input_types": [ + "Message" + ], + "dynamic": false, + "info": "The background color of the icon.", + "title_case": false, + "type": "str", + "_input_type": "MessageTextInput" + }, + "chat_icon": { + "tool_mode": false, + "trace_as_input": true, + "trace_as_metadata": true, + "load_from_db": false, + "list": false, + "list_add_label": "Add More", + "required": false, + "placeholder": "", + "show": true, + "name": "chat_icon", + "value": "", + "display_name": "Icon", + "advanced": true, + "input_types": [ + "Message" + ], + "dynamic": false, + "info": "The icon of the message.", + "title_case": false, + "type": "str", + "_input_type": "MessageTextInput" + }, + "code": { + "type": "code", + "required": true, + "placeholder": "", + "list": false, + "show": true, + "multiline": true, + "value": "from langflow.base.io.chat import ChatComponent\nfrom langflow.inputs import BoolInput\nfrom langflow.io import DropdownInput, MessageInput, MessageTextInput, Output\nfrom langflow.schema.message import Message\nfrom langflow.schema.properties import Source\nfrom langflow.utils.constants import (\n MESSAGE_SENDER_AI,\n MESSAGE_SENDER_NAME_AI,\n MESSAGE_SENDER_USER,\n)\n\n\nclass ChatOutput(ChatComponent):\n display_name = \"Chat Output\"\n description = \"Display a chat message in the Playground.\"\n icon = \"MessagesSquare\"\n name = \"ChatOutput\"\n minimized = True\n\n inputs = [\n MessageInput(\n name=\"input_value\",\n display_name=\"Text\",\n info=\"Message to be passed as output.\",\n ),\n BoolInput(\n name=\"should_store_message\",\n display_name=\"Store Messages\",\n info=\"Store the message in the history.\",\n value=True,\n advanced=True,\n ),\n DropdownInput(\n name=\"sender\",\n display_name=\"Sender Type\",\n options=[MESSAGE_SENDER_AI, MESSAGE_SENDER_USER],\n value=MESSAGE_SENDER_AI,\n advanced=True,\n info=\"Type of sender.\",\n ),\n MessageTextInput(\n name=\"sender_name\",\n display_name=\"Sender Name\",\n info=\"Name of the sender.\",\n value=MESSAGE_SENDER_NAME_AI,\n advanced=True,\n ),\n MessageTextInput(\n name=\"session_id\",\n display_name=\"Session ID\",\n info=\"The session ID of the chat. If empty, the current session ID parameter will be used.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"data_template\",\n display_name=\"Data Template\",\n value=\"{text}\",\n advanced=True,\n info=\"Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.\",\n ),\n MessageTextInput(\n name=\"background_color\",\n display_name=\"Background Color\",\n info=\"The background color of the icon.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"chat_icon\",\n display_name=\"Icon\",\n info=\"The icon of the message.\",\n advanced=True,\n ),\n MessageTextInput(\n name=\"text_color\",\n display_name=\"Text Color\",\n info=\"The text color of the name\",\n advanced=True,\n ),\n ]\n outputs = [\n Output(\n display_name=\"Message\",\n name=\"message\",\n method=\"message_response\",\n ),\n ]\n\n def _build_source(self, id_: str | None, display_name: str | None, source: str | None) -> Source:\n source_dict = {}\n if id_:\n source_dict[\"id\"] = id_\n if display_name:\n source_dict[\"display_name\"] = display_name\n if source:\n source_dict[\"source\"] = source\n return Source(**source_dict)\n\n async def message_response(self) -> Message:\n source, icon, display_name, source_id = self.get_properties_from_source_component()\n background_color = self.background_color\n text_color = self.text_color\n if self.chat_icon:\n icon = self.chat_icon\n message = self.input_value if isinstance(self.input_value, Message) else Message(text=self.input_value)\n message.sender = self.sender\n message.sender_name = self.sender_name\n message.session_id = self.session_id\n message.flow_id = self.graph.flow_id if hasattr(self, \"graph\") else None\n message.properties.source = self._build_source(source_id, display_name, source)\n message.properties.icon = icon\n message.properties.background_color = background_color\n message.properties.text_color = text_color\n if self.session_id and isinstance(message, Message) and self.should_store_message:\n stored_message = await self.send_message(\n message,\n )\n self.message.value = stored_message\n message = stored_message\n\n self.status = message\n return message\n", + "fileTypes": [], + "file_path": "", + "password": false, + "name": "code", + "advanced": true, + "dynamic": true, + "info": "", + "load_from_db": false, + "title_case": false + }, + "data_template": { + "tool_mode": false, + "trace_as_input": true, + "trace_as_metadata": true, + "load_from_db": false, + "list": false, + "list_add_label": "Add More", + "required": false, + "placeholder": "", + "show": true, + "name": "data_template", + "value": "{text}", + "display_name": "Data Template", + "advanced": true, + "input_types": [ + "Message" + ], + "dynamic": false, + "info": "Template to convert Data to Text. If left empty, it will be dynamically set to the Data's text key.", + "title_case": false, + "type": "str", + "_input_type": "MessageTextInput" + }, + "input_value": { + "trace_as_input": true, + "tool_mode": false, + "trace_as_metadata": true, + "load_from_db": false, + "list": false, + "list_add_label": "Add More", + "required": false, + "placeholder": "", + "show": true, + "name": "input_value", + "value": "", + "display_name": "Text", + "advanced": false, + "input_types": [ + "Message" + ], + "dynamic": false, + "info": "Message to be passed as output.", + "title_case": false, + "type": "str", + "_input_type": "MessageInput" + }, + "sender": { + "tool_mode": false, + "trace_as_metadata": true, + "options": [ + "Machine", + "User" + ], + "options_metadata": [], + "combobox": false, + "dialog_inputs": {}, + "required": false, + "placeholder": "", + "show": true, + "name": "sender", + "value": "Machine", + "display_name": "Sender Type", + "advanced": true, + "dynamic": false, + "info": "Type of sender.", + "title_case": false, + "type": "str", + "_input_type": "DropdownInput" + }, + "sender_name": { + "tool_mode": false, + "trace_as_input": true, + "trace_as_metadata": true, + "load_from_db": false, + "list": false, + "list_add_label": "Add More", + "required": false, + "placeholder": "", + "show": true, + "name": "sender_name", + "value": "AI", + "display_name": "Sender Name", + "advanced": true, + "input_types": [ + "Message" + ], + "dynamic": false, + "info": "Name of the sender.", + "title_case": false, + "type": "str", + "_input_type": "MessageTextInput" + }, + "session_id": { + "tool_mode": false, + "trace_as_input": true, + "trace_as_metadata": true, + "load_from_db": false, + "list": false, + "list_add_label": "Add More", + "required": false, + "placeholder": "", + "show": true, + "name": "session_id", + "value": "", + "display_name": "Session ID", + "advanced": true, + "input_types": [ + "Message" + ], + "dynamic": false, + "info": "The session ID of the chat. If empty, the current session ID parameter will be used.", + "title_case": false, + "type": "str", + "_input_type": "MessageTextInput" + }, + "should_store_message": { + "tool_mode": false, + "trace_as_metadata": true, + "list": false, + "list_add_label": "Add More", + "required": false, + "placeholder": "", + "show": true, + "name": "should_store_message", + "value": true, + "display_name": "Store Messages", + "advanced": true, + "dynamic": false, + "info": "Store the message in the history.", + "title_case": false, + "type": "bool", + "_input_type": "BoolInput" + }, + "text_color": { + "tool_mode": false, + "trace_as_input": true, + "trace_as_metadata": true, + "load_from_db": false, + "list": false, + "list_add_label": "Add More", + "required": false, + "placeholder": "", + "show": true, + "name": "text_color", + "value": "", + "display_name": "Text Color", + "advanced": true, + "input_types": [ + "Message" + ], + "dynamic": false, + "info": "The text color of the name", + "title_case": false, + "type": "str", + "_input_type": "MessageTextInput" + } + }, + "description": "Display a chat message in the Playground.", + "icon": "MessagesSquare", + "base_classes": [ + "Message" + ], + "display_name": "Chat Output", + "documentation": "", + "minimized": true, + "custom_fields": {}, + "output_types": [], + "pinned": false, + "conditional_paths": [], + "frozen": false, + "outputs": [ + { + "types": [ + "Message" + ], + "selected": "Message", + "name": "message", + "hidden": null, + "display_name": "Message", + "method": "message_response", + "value": "__UNDEFINED__", + "cache": true, + "required_inputs": null, + "allows_loop": false, + "tool_mode": true + } + ], + "field_order": [ + "input_value", + "should_store_message", + "sender", + "sender_name", + "session_id", + "data_template", + "background_color", + "chat_icon", + "text_color" + ], + "beta": false, + "legacy": false, + "edited": false, + "metadata": {}, + "tool_mode": false, + "lf_version": "1.1.3.dev5" + }, + "type": "ChatOutput" + }, + "selected": false, + "measured": { + "width": 320, + "height": 229 + }, + "dragging": false + } + ], + "edges": [ + { + "animated": false, + "className": "", + "data": { + "sourceHandle": { + "dataType": "Prompt", + "id": "Prompt-GSe05", + "name": "prompt", + "output_types": [ + "Message" + ] + }, + "targetHandle": { + "fieldName": "input_value", + "id": "OpenAIModel-NT3Gk", + "inputTypes": [ + "Message" + ], + "type": "str" + } + }, + "id": "reactflow__edge-Prompt-GSe05{œdataTypeœ:œPromptœ,œidœ:œPrompt-GSe05œ,œnameœ:œpromptœ,œoutput_typesœ:[œMessageœ]}-OpenAIModel-NT3Gk{œfieldNameœ:œinput_valueœ,œidœ:œOpenAIModel-NT3Gkœ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}", + "source": "Prompt-GSe05", + "sourceHandle": "{œdataTypeœ:œPromptœ,œidœ:œPrompt-GSe05œ,œnameœ:œpromptœ,œoutput_typesœ:[œMessageœ]}", + "target": "OpenAIModel-NT3Gk", + "targetHandle": "{œfieldNameœ:œinput_valueœ,œidœ:œOpenAIModel-NT3Gkœ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}" + }, + { + "animated": false, + "className": "", + "data": { + "sourceHandle": { + "dataType": "OpenAIModel", + "id": "OpenAIModel-NT3Gk", + "name": "text_output", + "output_types": [ + "Message" + ] + }, + "targetHandle": { + "fieldName": "input_value", + "id": "ChatOutput-kTP2v", + "inputTypes": [ + "Message" + ], + "type": "str" + } + }, + "id": "reactflow__edge-OpenAIModel-NT3Gk{œdataTypeœ:œOpenAIModelœ,œidœ:œOpenAIModel-NT3Gkœ,œnameœ:œtext_outputœ,œoutput_typesœ:[œMessageœ]}-ChatOutput-kTP2v{œfieldNameœ:œinput_valueœ,œidœ:œChatOutput-kTP2vœ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}", + "source": "OpenAIModel-NT3Gk", + "sourceHandle": "{œdataTypeœ:œOpenAIModelœ,œidœ:œOpenAIModel-NT3Gkœ,œnameœ:œtext_outputœ,œoutput_typesœ:[œMessageœ]}", + "target": "ChatOutput-kTP2v", + "targetHandle": "{œfieldNameœ:œinput_valueœ,œidœ:œChatOutput-kTP2vœ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}" + }, + { + "source": "ChatInput-1OpYH", + "sourceHandle": "{œdataTypeœ:œChatInputœ,œidœ:œChatInput-1OpYHœ,œnameœ:œmessageœ,œoutput_typesœ:[œMessageœ]}", + "target": "Agent-VX80X", + "targetHandle": "{œfieldNameœ:œinput_valueœ,œidœ:œAgent-VX80Xœ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}", + "data": { + "targetHandle": { + "fieldName": "input_value", + "id": "Agent-VX80X", + "inputTypes": [ + "Message" + ], + "type": "str" + }, + "sourceHandle": { + "dataType": "ChatInput", + "id": "ChatInput-1OpYH", + "name": "message", + "output_types": [ + "Message" + ] + } + }, + "id": "reactflow__edge-ChatInput-1OpYH{œdataTypeœ:œChatInputœ,œidœ:œChatInput-1OpYHœ,œnameœ:œmessageœ,œoutput_typesœ:[œMessageœ]}-Agent-VX80X{œfieldNameœ:œinput_valueœ,œidœ:œAgent-VX80Xœ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}", + "animated": false, + "className": "" + }, + { + "source": "ComposioAPI-bIRZI", + "sourceHandle": "{œdataTypeœ:œComposioAPIœ,œidœ:œComposioAPI-bIRZIœ,œnameœ:œtoolsœ,œoutput_typesœ:[œToolœ]}", + "target": "Agent-VX80X", + "targetHandle": "{œfieldNameœ:œtoolsœ,œidœ:œAgent-VX80Xœ,œinputTypesœ:[œToolœ],œtypeœ:œotherœ}", + "data": { + "targetHandle": { + "fieldName": "tools", + "id": "Agent-VX80X", + "inputTypes": [ + "Tool" + ], + "type": "other" + }, + "sourceHandle": { + "dataType": "ComposioAPI", + "id": "ComposioAPI-bIRZI", + "name": "tools", + "output_types": [ + "Tool" + ] + } + }, + "id": "xy-edge__ComposioAPI-bIRZI{œdataTypeœ:œComposioAPIœ,œidœ:œComposioAPI-bIRZIœ,œnameœ:œtoolsœ,œoutput_typesœ:[œToolœ]}-Agent-VX80X{œfieldNameœ:œtoolsœ,œidœ:œAgent-VX80Xœ,œinputTypesœ:[œToolœ],œtypeœ:œotherœ}", + "animated": false, + "className": "" + }, + { + "source": "TextInput-pqkFF", + "sourceHandle": "{œdataTypeœ:œTextInputœ,œidœ:œTextInput-pqkFFœ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}", + "target": "Prompt-GSe05", + "targetHandle": "{œfieldNameœ:œinstructionsœ,œidœ:œPrompt-GSe05œ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}", + "data": { + "targetHandle": { + "fieldName": "instructions", + "id": "Prompt-GSe05", + "inputTypes": [ + "Message" + ], + "type": "str" + }, + "sourceHandle": { + "dataType": "TextInput", + "id": "TextInput-pqkFF", + "name": "text", + "output_types": [ + "Message" + ] + } + }, + "id": "xy-edge__TextInput-pqkFF{œdataTypeœ:œTextInputœ,œidœ:œTextInput-pqkFFœ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}-Prompt-GSe05{œfieldNameœ:œinstructionsœ,œidœ:œPrompt-GSe05œ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}", + "animated": false, + "className": "" + }, + { + "source": "Agent-VX80X", + "sourceHandle": "{œdataTypeœ:œAgentœ,œidœ:œAgent-VX80Xœ,œnameœ:œresponseœ,œoutput_typesœ:[œMessageœ]}", + "target": "OpenAIModel-WX4ZH", + "targetHandle": "{œfieldNameœ:œinput_valueœ,œidœ:œOpenAIModel-WX4ZHœ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}", + "data": { + "targetHandle": { + "fieldName": "input_value", + "id": "OpenAIModel-WX4ZH", + "inputTypes": [ + "Message" + ], + "type": "str" + }, + "sourceHandle": { + "dataType": "Agent", + "id": "Agent-VX80X", + "name": "response", + "output_types": [ + "Message" + ] + } + }, + "id": "xy-edge__Agent-VX80X{œdataTypeœ:œAgentœ,œidœ:œAgent-VX80Xœ,œnameœ:œresponseœ,œoutput_typesœ:[œMessageœ]}-OpenAIModel-WX4ZH{œfieldNameœ:œinput_valueœ,œidœ:œOpenAIModel-WX4ZHœ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}", + "className": "", + "animated": false + }, + { + "source": "ParseData-qQwA0", + "sourceHandle": "{œdataTypeœ:œParseDataœ,œidœ:œParseData-qQwA0œ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}", + "target": "Prompt-GSe05", + "targetHandle": "{œfieldNameœ:œreferencesœ,œidœ:œPrompt-GSe05œ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}", + "data": { + "targetHandle": { + "fieldName": "references", + "id": "Prompt-GSe05", + "inputTypes": [ + "Message" + ], + "type": "str" + }, + "sourceHandle": { + "dataType": "ParseData", + "id": "ParseData-qQwA0", + "name": "text", + "output_types": [ + "Message" + ] + } + }, + "id": "xy-edge__ParseData-qQwA0{œdataTypeœ:œParseDataœ,œidœ:œParseData-qQwA0œ,œnameœ:œtextœ,œoutput_typesœ:[œMessageœ]}-Prompt-GSe05{œfieldNameœ:œreferencesœ,œidœ:œPrompt-GSe05œ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}", + "animated": false, + "className": "" + }, + { + "source": "OpenAIModel-WX4ZH", + "sourceHandle": "{œdataTypeœ:œOpenAIModelœ,œidœ:œOpenAIModel-WX4ZHœ,œnameœ:œtext_outputœ,œoutput_typesœ:[œMessageœ]}", + "target": "URL-ohTze", + "targetHandle": "{œfieldNameœ:œurlsœ,œidœ:œURL-ohTzeœ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}", + "data": { + "targetHandle": { + "fieldName": "urls", + "id": "URL-ohTze", + "inputTypes": [ + "Message" + ], + "type": "str" + }, + "sourceHandle": { + "dataType": "OpenAIModel", + "id": "OpenAIModel-WX4ZH", + "name": "text_output", + "output_types": [ + "Message" + ] + } + }, + "id": "xy-edge__OpenAIModel-WX4ZH{œdataTypeœ:œOpenAIModelœ,œidœ:œOpenAIModel-WX4ZHœ,œnameœ:œtext_outputœ,œoutput_typesœ:[œMessageœ]}-URL-ohTze{œfieldNameœ:œurlsœ,œidœ:œURL-ohTzeœ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}", + "className": "", + "animated": false + }, + { + "source": "URL-ohTze", + "sourceHandle": "{œdataTypeœ:œURLœ,œidœ:œURL-ohTzeœ,œnameœ:œdataœ,œoutput_typesœ:[œDataœ]}", + "target": "ParseData-qQwA0", + "targetHandle": "{œfieldNameœ:œdataœ,œidœ:œParseData-qQwA0œ,œinputTypesœ:[œDataœ],œtypeœ:œotherœ}", + "data": { + "targetHandle": { + "fieldName": "data", + "id": "ParseData-qQwA0", + "inputTypes": [ + "Data" + ], + "type": "other" + }, + "sourceHandle": { + "dataType": "URL", + "id": "URL-ohTze", + "name": "data", + "output_types": [ + "Data" + ] + } + }, + "id": "xy-edge__URL-ohTze{œdataTypeœ:œURLœ,œidœ:œURL-ohTzeœ,œnameœ:œdataœ,œoutput_typesœ:[œDataœ]}-ParseData-qQwA0{œfieldNameœ:œdataœ,œidœ:œParseData-qQwA0œ,œinputTypesœ:[œDataœ],œtypeœ:œotherœ}", + "className": "", + "animated": false + }, + { + "source": "Agent-VX80X", + "sourceHandle": "{œdataTypeœ:œAgentœ,œidœ:œAgent-VX80Xœ,œnameœ:œresponseœ,œoutput_typesœ:[œMessageœ]}", + "target": "ChatOutput-ngjfd", + "targetHandle": "{œfieldNameœ:œinput_valueœ,œidœ:œChatOutput-ngjfdœ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}", + "data": { + "targetHandle": { + "fieldName": "input_value", + "id": "ChatOutput-ngjfd", + "inputTypes": [ + "Message" + ], + "type": "str" + }, + "sourceHandle": { + "dataType": "Agent", + "id": "Agent-VX80X", + "name": "response", + "output_types": [ + "Message" + ] + } + }, + "id": "xy-edge__Agent-VX80X{œdataTypeœ:œAgentœ,œidœ:œAgent-VX80Xœ,œnameœ:œresponseœ,œoutput_typesœ:[œMessageœ]}-ChatOutput-ngjfd{œfieldNameœ:œinput_valueœ,œidœ:œChatOutput-ngjfdœ,œinputTypesœ:[œMessageœ],œtypeœ:œstrœ}", + "animated": false, + "className": "" + } + ], + "viewport": { + "x": 237.85542869340588, + "y": -60.19074215375224, + "zoom": 0.4879725590108378 + } + }, + "description": "Auto-generate a customized blog post from instructions and referenced articles.", + "name": "Langlow NoCode JSON File LAB", + "last_tested_version": "1.1.3.dev5", + "endpoint_name": null, + "is_component": false +} \ No newline at end of file diff --git a/Class-Labs/Lab-7(Langflow-Nocodetoll)/readme.md b/Class-Labs/Lab-7(Langflow-Nocodetoll)/readme.md new file mode 100644 index 0000000..e5b014a --- /dev/null +++ b/Class-Labs/Lab-7(Langflow-Nocodetoll)/readme.md @@ -0,0 +1,131 @@ +**"In this lab, we will create an agent that writes blogs for you based on prompts you provide. First, we use the Composio component, which automatically reads the email of the person you enter in the chatbot. If a URL is found in the email body, the agent will extract content from that URL and generate a blog based on the prompt you define. + +To build this, we are using LangFlow, a Node.js-based tool platform. + +Let’s get started!"** + +In the rapidly evolving world of artificial intelligence, the rise of no-code platforms has revolutionized the way we approach AI development. LangFlow, a powerful and innovative tool, has emerged as a game-changer in this space, offering a user-friendly and accessible way to create AI applications without the need for extensive coding knowledge. + +## What is Langflow? +LangFlow is a cutting-edge no-code AI ecosystem that enables developers, entrepreneurs, and even non-technical individuals to build AI applications with ease. It provides a visually appealing and intuitive drag-and-drop interface, allowing users to create AI workflows by connecting reusable components. This modular and interactive design fosters rapid experimentation and prototyping, making it an ideal tool for both beginners and experienced AI enthusiasts. + +## Key Features of LangFlow +- One of the standout features of LangFlow is its dynamic input capabilities, which allow for easy customization using curly brackets {}. This flexibility enables users to tailor their AI applications to specific needs and requirements, ensuring that the final product is customized to their unique use case. + +- Another notable aspect of LangFlow is its fine-tuning capabilities, which enable users to maximize the potential of Large Language Models (LLMs). By fine-tuning these models with custom training data from CSV or JSON files, users can create highly specialized AI applications that cater to their specific domains or industries. + +- LangFlow’s Python-native architecture leverages powerful data manipulation and machine learning libraries, ensuring that users have access to cutting-edge tools and technologies. This foundation allows for seamless integration with various tools and stacks, making it easy for teams to incorporate LangFlow into their existing workflows. + +## Explanation of Langflow + +LangFlow simplifies the process of integrating and orchestrating LLMs by offering: + +- **Graphical Interface:** Users can design AI pipelines visually, making it easier to experiment with different model configurations. + +- **Modular Components:** Pre-built components allow seamless integration of APIs, data sources, and AI functionalities. + +- **Customizable Workflows:** Users can define unique workflows tailored to specific use cases, from chatbots to automated content generation. + +- **Support for Multiple LLMs:** LangFlow supports various models, including OpenAI's GPT, Hugging Face models, and others. + +- **Flexibility:** Workflows can be exported and deployed as APIs, making it easy to integrate with existing applications. + +## Use Cases of LangFlow +- LangFlow’s versatility is showcased through its wide range of use cases. One of the most exciting applications is the ability to build local RAG (Retrieval Augmented Generation) chatbots by integrating with embedding models like Ollama. This feature enables the creation of highly personalized and context-aware conversational agents that can engage with users in a natural and intelligent manner. + +- Another compelling use case is the ability to chat with documents in various formats, including PDFs, DOCX, TXT, and websites. This feature allows users to extract valuable insights and information from large volumes of data, making it an ideal tool for research, analysis, and knowledge management. + +- LangFlow also excels in automating workflows and tasks, thanks to its seamless integration with over 5000+ integrations through Zapier. By connecting LangFlow with other tools and platforms, users can streamline their processes, reduce manual effort, and increase efficiency across various domains. + +## Setup the Project + +- Go to the LangFlow page and click on "Get Started for Free", as shown in the image below. + +![Langflow Screenshot](./Images/Screenshot%20(1515).png) + +- Create your account on LangFlow. + +![Langflow Screenshot](./Images/Screenshot%20(1516).png) + +- After creating your account, click on "New Flow". + +![Langflow Screenshot](./Images/Screenshot%20(1517).png) + +- Now click on Blank Flow, as we are building it from scratch. + +![Langflow Screenshot](./Images/Screenshot%20(1518).png) + +- Now, click on the untitled document above, and in the dropdown, click on the Import Option to import the JSON file that has been provided to you. + +![Langflow Screenshot](./Images/Screenshot%20(1520).png) + +- Now, you will see that your project has been successfully imported, and you can view all the agents. + +![Langflow Screenshot](./Images/img1.png) + +- To use the first component, you'll need an API key from composio. You can obtain a free API key with some initial credits by visiting the following URL: [https://app.composio.dev/developers]. Once you have your key, you can proceed with the integration. + +![Langflow Screenshot](./Images/Screenshot%20(1549).png) + +- Once you have the key, enter it in the designated field and click 'Refresh.' Next, select 'GMAIL' as the app name and choose 'GMAIL_FETCH_EMAIL' as the action to use. + 🔹 Note: You will see that my auth status is green. However, when you run it for the first time, you will need to authenticate it. An authentication option will appear, and you must complete the authentication process before proceeding."** + +![Langflow Screenshot](./Images/Refresh%20Button.png) + +![Langflow Screenshot](./Images/Screenshot%20(1551).png) + +- Provide the Open API Key to the agent and the Open API Key component to enable authentication and access. Ensure that the API key is securely stored and used only for authorized requests. + +![Langflow Screenshot](./Images/Screenshot%20(1552).png) + +- In the Text Section Component, click on the text, and you can define the styling of your blog, such as the tone, the heading, etc. + +![Langflow Screenshot](./Images/Screenshot%20(1555).png) + +- Now, from here, you can edit your prompt and click on Check, then Save. + +![Langflow Screenshot](./Images/Screenshot%20(1554).png) + +- Now, click on the Playground Section. + +![Langflow Screenshot](./Images/Screenshot%20(1553).png) + +- In the Playground section, enter a query such as: 'Please provide me the latest email from this particular user.' The system will then fetch and display the email body of the specified email. + Example: + Input: "Give me the recent email from abcd@gmail.com" + +![Langflow Screenshot](./Images/Screenshot%20(1556).png) + +- In the Playground section, enter a query such as: 'Please provide me the latest email from this particular user.' The system will fetch and display the email body of the specified email. + + Additionally, it will automatically generate a blog by extracting the URL from the email and using its content as a reference. + + Example: + + Input: "Give me the recent email from abcd@gmail.com" + Output: + Fetched Email Body: [Email content] + Generated Blog: [Auto-written blog based on the extracted URL]" + +![Langflow Screenshot](./Images/Screenshot%20(1556).png) + +- ⚠️ Note: If no URL is found in the email body, the URL component will fail, and the blog generation will not proceed. + +![Langflow Screenshot](./Images/Screenshot%20(1557).png) + +- The Blog Output . + +![Langflow Screenshot](./Images/Screenshot%20(1558).png) + + + +## Conclusion +LangFlow is a transformative tool that is reshaping the landscape of AI development. By providing a no-code platform that is both powerful and accessible, LangFlow is democratizing AI and empowering individuals from all backgrounds to create innovative solutions. Whether you are an entrepreneur looking to build a chatbot, a researcher seeking to analyze large datasets, or a business owner aiming to automate workflows, LangFlow has something to offer. + +As we look to the future, it is clear that no-code AI development platforms like LangFlow will play a crucial role in driving the widespread adoption of AI technologies. By making AI development more accessible and user-friendly, these tools will enable a new generation of innovators to harness the power of AI and create solutions that positively impact the world around us. + + +## Reference Tutorial Video +[Get Started With Lanflow](https://youtu.be/LPfstlhSA_w?si=HMYVZ5q60IBJ7H9x) + +