diff --git a/fastchat/serve/gradio_block_arena_anony.py b/fastchat/serve/gradio_block_arena_anony.py index 803ef012d..6797290ba 100644 --- a/fastchat/serve/gradio_block_arena_anony.py +++ b/fastchat/serve/gradio_block_arena_anony.py @@ -185,7 +185,6 @@ def share_click(state0, state1, model_selector0, model_selector1, request: gr.Re "chatglm3-6b": 2, # tier 1 "deluxe-chat-v1.2": 2, - "palm-2": 1.5, "llama-2-70b-chat": 1.5, "llama-2-13b-chat": 1.5, "codellama-34b-instruct": 1.5, @@ -215,6 +214,7 @@ def share_click(state0, state1, model_selector0, model_selector1, request: gr.Re "llama-13b": 0.1, "chatglm-6b": 0.5, "deluxe-chat-v1": 4, + "palm-2": 1.5, } # target model sampling weights will be boosted. @@ -286,7 +286,6 @@ def share_click(state0, state1, model_selector0, model_selector1, request: gr.Re "vicuna-13b": {"llama-2-13b-chat", "llama-2-70b-chat"}, "vicuna-7b": {"llama-2-7b-chat", "mistral-7b-instruct", "llama-2-13b-chat"}, "wizardlm-70b": {"gpt-3.5-turbo-0613", "vicuna-33b", "claude-instant-1"}, - "palm-2": {"llama-2-13b-chat", "gpt-3.5-turbo-0613"}, } SAMPLING_BOOST_MODELS = [ @@ -305,11 +304,7 @@ def share_click(state0, state1, model_selector0, model_selector1, request: gr.Re ] # outage models won't be sampled. -OUTAGE_MODELS = [ - "zephyr-7b-alpha", - "falcon-180b-chat", - "palm-2", -] +OUTAGE_MODELS = [] def get_sample_weight(model): diff --git a/fastchat/serve/gradio_web_server.py b/fastchat/serve/gradio_web_server.py index 15d56a283..9642ce1b4 100644 --- a/fastchat/serve/gradio_web_server.py +++ b/fastchat/serve/gradio_web_server.py @@ -141,14 +141,19 @@ def get_model_list( models += list(openai_compatible_models_info.keys()) if add_chatgpt: - models += ["gpt-3.5-turbo-0613", "gpt-3.5-turbo-1106"] + models += [ + "gpt-4-0314", + "gpt-4-0613", + "gpt-3.5-turbo-0613", + "gpt-3.5-turbo-1106", + ] if add_claude: - models += ["claude-2.0", "claude-2.1", "claude-instant-1"] + models += ["claude-2.1", "claude-2.0", "claude-instant-1"] if add_palm: models += ["gemini-pro"] models = list(set(models)) - # hidden_models = ["deluxe-chat-v1.2", "gpt-4-0613"] - hidden_models = ["gpt-4-0613"] + + hidden_models = ["gpt-4-0314", "gpt-4-0613"] for hm in hidden_models: del models[models.index(hm)] diff --git a/fastchat/serve/gradio_web_server_multi.py b/fastchat/serve/gradio_web_server_multi.py index 389daf93f..0009c02ad 100644 --- a/fastchat/serve/gradio_web_server_multi.py +++ b/fastchat/serve/gradio_web_server_multi.py @@ -85,9 +85,9 @@ def load_demo(url_params, request: gr.Request): # Only enable these models in anony battles. if args.add_chatgpt: models_anony += [ - "gpt-4", - "gpt-3.5-turbo", - "gpt-4-turbo", + "gpt-4-0314", + "gpt-4-0613", + "gpt-3.5-turbo-0613", "gpt-3.5-turbo-1106", ] if args.add_claude: @@ -95,13 +95,9 @@ def load_demo(url_params, request: gr.Request): if args.add_palm: models_anony += ["gemini-pro"] anony_only_models = [ - "deluxe-chat-v1.2", "claude-1", - "claude-2.0", - "claude-2.1", - "claude-instant-1", - "gpt-4-0613", "gpt-4-0314", + "gpt-4-0613", ] for mdl in anony_only_models: models_anony.append(mdl)