daiqi commited on
Commit
f8f2c33
·
verified ·
1 Parent(s): bc8199b

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +134 -81
app.py CHANGED
@@ -108,6 +108,13 @@ def init_leaderboard(dataframe):
108
  )
109
 
110
 
 
 
 
 
 
 
 
111
  demo = gr.Blocks(css=custom_css)
112
  with demo:
113
  gr.HTML(TITLE)
@@ -125,88 +132,134 @@ with demo:
125
  with gr.Column():
126
  with gr.Row():
127
  gr.Markdown(EVALUATION_QUEUE_TEXT, elem_classes="markdown-text")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
128
 
129
- with gr.Column():
130
- with gr.Accordion(
131
- f"✅ Finished Evaluations ({len(finished_eval_queue_df)})",
132
- open=False,
133
- ):
134
- with gr.Row():
135
- finished_eval_table = gr.components.Dataframe(
136
- value=finished_eval_queue_df,
137
- headers=EVAL_COLS,
138
- datatype=EVAL_TYPES,
139
- row_count=5,
140
- )
141
- with gr.Accordion(
142
- f"🔄 Running Evaluation Queue ({len(running_eval_queue_df)})",
143
- open=False,
144
- ):
145
- with gr.Row():
146
- running_eval_table = gr.components.Dataframe(
147
- value=running_eval_queue_df,
148
- headers=EVAL_COLS,
149
- datatype=EVAL_TYPES,
150
- row_count=5,
151
- )
152
-
153
- with gr.Accordion(
154
- f"⏳ Pending Evaluation Queue ({len(pending_eval_queue_df)})",
155
- open=False,
156
- ):
157
- with gr.Row():
158
- pending_eval_table = gr.components.Dataframe(
159
- value=pending_eval_queue_df,
160
- headers=EVAL_COLS,
161
- datatype=EVAL_TYPES,
162
- row_count=5,
163
- )
164
- with gr.Row():
165
- gr.Markdown("# ✉️✨ Submit your model here!", elem_classes="markdown-text")
166
-
167
- with gr.Row():
168
- with gr.Column():
169
- model_name_textbox = gr.Textbox(label="Model name")
170
- revision_name_textbox = gr.Textbox(label="Revision commit", placeholder="main")
171
- model_type = gr.Dropdown(
172
- choices=[t.to_str(" : ") for t in ModelType if t != ModelType.Unknown],
173
- label="Model type",
174
- multiselect=False,
175
- value=None,
176
- interactive=True,
177
- )
178
-
179
- with gr.Column():
180
- precision = gr.Dropdown(
181
- choices=[i.value.name for i in Precision if i != Precision.Unknown],
182
- label="Precision",
183
- multiselect=False,
184
- value="float16",
185
- interactive=True,
186
- )
187
- weight_type = gr.Dropdown(
188
- choices=[i.value.name for i in WeightType],
189
- label="Weights type",
190
- multiselect=False,
191
- value="Original",
192
- interactive=True,
193
- )
194
- base_model_name_textbox = gr.Textbox(label="Base model (for delta or adapter weights)")
195
-
196
- submit_button = gr.Button("Submit Eval")
197
- submission_result = gr.Markdown()
198
- submit_button.click(
199
- add_new_eval,
200
- [
201
- model_name_textbox,
202
- base_model_name_textbox,
203
- revision_name_textbox,
204
- precision,
205
- weight_type,
206
- model_type,
207
- ],
208
- submission_result,
209
- )
210
 
211
  with gr.Row():
212
  with gr.Accordion("📙 Citation", open=False):
 
108
  )
109
 
110
 
111
+ # =================test
112
+ if os.path.exists("./text.txt"):
113
+ print(open("./text.txt").read())
114
+ else:
115
+ print("not exists")
116
+
117
+
118
  demo = gr.Blocks(css=custom_css)
119
  with demo:
120
  gr.HTML(TITLE)
 
132
  with gr.Column():
133
  with gr.Row():
134
  gr.Markdown(EVALUATION_QUEUE_TEXT, elem_classes="markdown-text")
135
+ with gr.Column():
136
+ with gr.Row():
137
+ score_input = gr.Textbox(label="Score (float)", placeholder="请输入分数")
138
+ name_input = gr.Textbox(label="Name (str)", placeholder="请输入名称")
139
+ base_model_input = gr.Textbox(label="BaseModel (str)", placeholder="请输入基模型名称")
140
+
141
+ with gr.Row():
142
+ env_dropdown = gr.Dropdown(
143
+ choices=["Sokoban", "Football", "WebUI"],
144
+ label="Env.",
145
+ value="Sokoban"
146
+ )
147
+ target_research_dropdown = gr.Dropdown(
148
+ choices=["Model-Eval-Online", "Model-Eval-Global"],
149
+ label="Target-research",
150
+ value="Model-Eval-Online"
151
+ )
152
+ subset_dropdown = gr.Dropdown(
153
+ choices=["mini", "all"],
154
+ label="Subset",
155
+ value="mini"
156
+ )
157
+
158
+ link_input = gr.Textbox(label="Link (str)", placeholder="请输入链接")
159
+
160
+ submit_button = gr.Button("Submit Eval")
161
+ submission_result = gr.Markdown()
162
+
163
+ def submit_eval(score, name, base_model, env, target_research, subset, link):
164
+ # 在这里处理提交逻辑,可以将信息保存到数据库或进行其他处理
165
+ result = (
166
+ f"Score: {score}\n"
167
+ f"Name: {name}\n"
168
+ f"BaseModel: {base_model}\n"
169
+ f"Env: {env}\n"
170
+ f"Target-research: {target_research}\n"
171
+ f"Subset: {subset}\n"
172
+ f"Link: {link}"
173
+ )
174
+ open("./text.txt", "w").write(result)
175
+ return result
176
+
177
+ submit_button.click(
178
+ submit_eval,
179
+ [score_input, name_input, base_model_input, env_dropdown, target_research_dropdown, subset_dropdown, link_input],
180
+ submission_result
181
+ )
182
+ # with gr.Column():
183
+ # with gr.Accordion(
184
+ # f"✅ Finished Evaluations ({len(finished_eval_queue_df)})",
185
+ # open=False,
186
+ # ):
187
+ # with gr.Row():
188
+ # finished_eval_table = gr.components.Dataframe(
189
+ # value=finished_eval_queue_df,
190
+ # headers=EVAL_COLS,
191
+ # datatype=EVAL_TYPES,
192
+ # row_count=5,
193
+ # )
194
+ # with gr.Accordion(
195
+ # f"🔄 Running Evaluation Queue ({len(running_eval_queue_df)})",
196
+ # open=False,
197
+ # ):
198
+ # with gr.Row():
199
+ # running_eval_table = gr.components.Dataframe(
200
+ # value=running_eval_queue_df,
201
+ # headers=EVAL_COLS,
202
+ # datatype=EVAL_TYPES,
203
+ # row_count=5,
204
+ # )
205
 
206
+ # with gr.Accordion(
207
+ # f"⏳ Pending Evaluation Queue ({len(pending_eval_queue_df)})",
208
+ # open=False,
209
+ # ):
210
+ # with gr.Row():
211
+ # pending_eval_table = gr.components.Dataframe(
212
+ # value=pending_eval_queue_df,
213
+ # headers=EVAL_COLS,
214
+ # datatype=EVAL_TYPES,
215
+ # row_count=5,
216
+ # )
217
+ # with gr.Row():
218
+ # gr.Markdown("# ✉️✨ Submit your model here!", elem_classes="markdown-text")
219
+
220
+ # with gr.Row():
221
+ # with gr.Column():
222
+ # model_name_textbox = gr.Textbox(label="Model name")
223
+ # revision_name_textbox = gr.Textbox(label="Revision commit", placeholder="main")
224
+ # model_type = gr.Dropdown(
225
+ # choices=[t.to_str(" : ") for t in ModelType if t != ModelType.Unknown],
226
+ # label="Model type",
227
+ # multiselect=False,
228
+ # value=None,
229
+ # interactive=True,
230
+ # )
231
+
232
+ # with gr.Column():
233
+ # precision = gr.Dropdown(
234
+ # choices=[i.value.name for i in Precision if i != Precision.Unknown],
235
+ # label="Precision",
236
+ # multiselect=False,
237
+ # value="float16",
238
+ # interactive=True,
239
+ # )
240
+ # weight_type = gr.Dropdown(
241
+ # choices=[i.value.name for i in WeightType],
242
+ # label="Weights type",
243
+ # multiselect=False,
244
+ # value="Original",
245
+ # interactive=True,
246
+ # )
247
+ # base_model_name_textbox = gr.Textbox(label="Base model (for delta or adapter weights)")
248
+
249
+ # submit_button = gr.Button("Submit Eval")
250
+ # submission_result = gr.Markdown()
251
+ # submit_button.click(
252
+ # add_new_eval,
253
+ # [
254
+ # model_name_textbox,
255
+ # base_model_name_textbox,
256
+ # revision_name_textbox,
257
+ # precision,
258
+ # weight_type,
259
+ # model_type,
260
+ # ],
261
+ # submission_result,
262
+ # )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
263
 
264
  with gr.Row():
265
  with gr.Accordion("📙 Citation", open=False):