Commit
•
9fd2174
1
Parent(s):
f87a65a
Fix logging clear behavior
Browse files- chat_interface_preference.py +15 -11
chat_interface_preference.py
CHANGED
@@ -40,6 +40,7 @@ from huggingface_hub import CommitScheduler
|
|
40 |
pattern = re.compile(r'<div class="message-identifier">(.*?)</div>', re.DOTALL)
|
41 |
|
42 |
PREFERENCE_TECHNIQUE_MAPPING = {"sft": "prompt", "dpo": "preference", "kto": "vibes"}
|
|
|
43 |
|
44 |
|
45 |
@document()
|
@@ -685,7 +686,7 @@ class ChatInterface(Blocks):
|
|
685 |
else:
|
686 |
response = None
|
687 |
if self._check_if_two_responses(response):
|
688 |
-
Info(
|
689 |
return history, history
|
690 |
else:
|
691 |
inputs, _, _ = special_args(self.fn, inputs=[message, history, *args], request=request)
|
@@ -730,7 +731,7 @@ class ChatInterface(Blocks):
|
|
730 |
else:
|
731 |
response = None
|
732 |
if self._check_if_two_responses(response):
|
733 |
-
Info(
|
734 |
yield history, history
|
735 |
else:
|
736 |
inputs, _, _ = special_args(self.fn, inputs=[message, history, *args], request=request)
|
@@ -815,19 +816,18 @@ class ChatInterface(Blocks):
|
|
815 |
list[list[str | tuple | None]],
|
816 |
]:
|
817 |
self._check_num_turns(history, generate=False)
|
|
|
818 |
history_as_openai_format = self._get_conversation_in_openai_format(history)
|
819 |
feedback = {"prompt": history_as_openai_format}
|
820 |
-
|
821 |
-
prompt, response = history[-1]
|
822 |
matches = self._check_if_two_responses(response)
|
|
|
823 |
if matches and log != "prompt":
|
824 |
option_a, option_b = matches[0], matches[1]
|
825 |
if log == "a":
|
826 |
chosen, rejected = option_a, option_b
|
827 |
-
Info("Logged preference: a")
|
828 |
elif log == "b":
|
829 |
chosen, rejected = option_b, option_a
|
830 |
-
Info("Logged preference: b")
|
831 |
elif log == "ab":
|
832 |
options = [option_a, option_b]
|
833 |
chosen, rejected = random.choice([options])
|
@@ -839,23 +839,26 @@ class ChatInterface(Blocks):
|
|
839 |
"rejected": [{"content": rejected, "role": "assistant"}],
|
840 |
}
|
841 |
)
|
|
|
842 |
self._save_feedback(feedback)
|
|
|
843 |
elif log == "ab":
|
844 |
self._save_feedback(feedback)
|
845 |
history[-1] = [prompt, chosen]
|
846 |
return history, message or "", history
|
847 |
elif log in ["conversation", "good", "bad"]:
|
848 |
-
feedback.update({"response": response})
|
849 |
if log == "good":
|
850 |
feedback.update({"label": True})
|
851 |
elif log == "bad":
|
852 |
feedback.update({"label": False})
|
853 |
-
Info("Logged conversation")
|
854 |
-
self._save_feedback(feedback)
|
855 |
|
|
|
|
|
856 |
return history, "", history
|
857 |
else:
|
858 |
-
|
|
|
|
|
859 |
|
860 |
async def _examples_fn(self, message: str, *args) -> list[list[str | None]]:
|
861 |
inputs, _, _ = special_args(self.fn, inputs=[message, [], *args], request=None)
|
@@ -907,7 +910,8 @@ class ChatInterface(Blocks):
|
|
907 |
if history:
|
908 |
_, response = history[-1]
|
909 |
if self._check_if_two_responses(response):
|
910 |
-
|
|
|
911 |
else:
|
912 |
await self._log_fn(message=message, history=history, log="prompt")
|
913 |
self._set_conversation_id()
|
|
|
40 |
pattern = re.compile(r'<div class="message-identifier">(.*?)</div>', re.DOTALL)
|
41 |
|
42 |
PREFERENCE_TECHNIQUE_MAPPING = {"sft": "prompt", "dpo": "preference", "kto": "vibes"}
|
43 |
+
_LOG_REQUIRED_MESSAGE = "First, provide preference, undo or clear to continue conversation."
|
44 |
|
45 |
|
46 |
@document()
|
|
|
686 |
else:
|
687 |
response = None
|
688 |
if self._check_if_two_responses(response):
|
689 |
+
Info(_LOG_REQUIRED_MESSAGE)
|
690 |
return history, history
|
691 |
else:
|
692 |
inputs, _, _ = special_args(self.fn, inputs=[message, history, *args], request=request)
|
|
|
731 |
else:
|
732 |
response = None
|
733 |
if self._check_if_two_responses(response):
|
734 |
+
Info(_LOG_REQUIRED_MESSAGE)
|
735 |
yield history, history
|
736 |
else:
|
737 |
inputs, _, _ = special_args(self.fn, inputs=[message, history, *args], request=request)
|
|
|
816 |
list[list[str | tuple | None]],
|
817 |
]:
|
818 |
self._check_num_turns(history, generate=False)
|
819 |
+
prompt, response = history[-1]
|
820 |
history_as_openai_format = self._get_conversation_in_openai_format(history)
|
821 |
feedback = {"prompt": history_as_openai_format}
|
822 |
+
feedback.update({"response": [{"content": response, "role": "assistant"}]})
|
|
|
823 |
matches = self._check_if_two_responses(response)
|
824 |
+
|
825 |
if matches and log != "prompt":
|
826 |
option_a, option_b = matches[0], matches[1]
|
827 |
if log == "a":
|
828 |
chosen, rejected = option_a, option_b
|
|
|
829 |
elif log == "b":
|
830 |
chosen, rejected = option_b, option_a
|
|
|
831 |
elif log == "ab":
|
832 |
options = [option_a, option_b]
|
833 |
chosen, rejected = random.choice([options])
|
|
|
839 |
"rejected": [{"content": rejected, "role": "assistant"}],
|
840 |
}
|
841 |
)
|
842 |
+
feedback["response"] = [{"content": chosen, "role": "assistant"}]
|
843 |
self._save_feedback(feedback)
|
844 |
+
Info("Logged succesfully")
|
845 |
elif log == "ab":
|
846 |
self._save_feedback(feedback)
|
847 |
history[-1] = [prompt, chosen]
|
848 |
return history, message or "", history
|
849 |
elif log in ["conversation", "good", "bad"]:
|
|
|
850 |
if log == "good":
|
851 |
feedback.update({"label": True})
|
852 |
elif log == "bad":
|
853 |
feedback.update({"label": False})
|
|
|
|
|
854 |
|
855 |
+
self._save_feedback(feedback)
|
856 |
+
Info("Logged succesfully")
|
857 |
return history, "", history
|
858 |
else:
|
859 |
+
self._save_feedback(feedback)
|
860 |
+
Info("Logged succesfully")
|
861 |
+
return history, "", history
|
862 |
|
863 |
async def _examples_fn(self, message: str, *args) -> list[list[str | None]]:
|
864 |
inputs, _, _ = special_args(self.fn, inputs=[message, [], *args], request=None)
|
|
|
910 |
if history:
|
911 |
_, response = history[-1]
|
912 |
if self._check_if_two_responses(response):
|
913 |
+
Info(_LOG_REQUIRED_MESSAGE)
|
914 |
+
return history, message or "", history
|
915 |
else:
|
916 |
await self._log_fn(message=message, history=history, log="prompt")
|
917 |
self._set_conversation_id()
|