Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
@@ -75,13 +75,12 @@ def create_demo():
|
|
75 |
minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)"
|
76 |
)
|
77 |
|
78 |
-
# Model Selection Section (
|
79 |
with gr.Row():
|
80 |
selected_models = gr.CheckboxGroup(
|
81 |
choices=list(model_options.keys()),
|
82 |
label="Select two models to compare",
|
83 |
value=["Llama-3.1-70B", "Qwen-2.5-1.5B-Instruct"], # Default models
|
84 |
-
maximum_choices=2, # Limit to two models
|
85 |
)
|
86 |
|
87 |
# Dynamic Response Section
|
@@ -107,6 +106,8 @@ def create_demo():
|
|
107 |
def generate_responses(
|
108 |
message, history, system_message, max_tokens, temperature, top_p, selected_models
|
109 |
):
|
|
|
|
|
110 |
responses = respond(
|
111 |
message, history, system_message, max_tokens, temperature, top_p, selected_models
|
112 |
)
|
|
|
75 |
minimum=0.1, maximum=1.0, value=0.95, step=0.05, label="Top-p (nucleus sampling)"
|
76 |
)
|
77 |
|
78 |
+
# Model Selection Section (no maximum_choices)
|
79 |
with gr.Row():
|
80 |
selected_models = gr.CheckboxGroup(
|
81 |
choices=list(model_options.keys()),
|
82 |
label="Select two models to compare",
|
83 |
value=["Llama-3.1-70B", "Qwen-2.5-1.5B-Instruct"], # Default models
|
|
|
84 |
)
|
85 |
|
86 |
# Dynamic Response Section
|
|
|
106 |
def generate_responses(
|
107 |
message, history, system_message, max_tokens, temperature, top_p, selected_models
|
108 |
):
|
109 |
+
if len(selected_models) != 2:
|
110 |
+
return "Error: Please select exactly two models to compare."
|
111 |
responses = respond(
|
112 |
message, history, system_message, max_tokens, temperature, top_p, selected_models
|
113 |
)
|