Skip to content

Commit c821a4a

Browse files
committed
fix conflict
1 parent ea7ecf3 commit c821a4a

File tree

2 files changed

+11
-11
lines changed

2 files changed

+11
-11
lines changed

deepeval/metrics/faithfulness/faithfulness.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -51,7 +51,7 @@ def __init__(
5151
self.async_mode = async_mode
5252
self.strict_mode = strict_mode
5353
self.verbose_mode = verbose_mode
54-
self.template = template()
54+
self.template = template
5555

5656
self.truths_extraction_limit = truths_extraction_limit
5757
if self.truths_extraction_limit is not None:

tests/test_faithfulness.py

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -69,17 +69,17 @@ def test_faithfulness():
6969
actual_output=output,
7070
retrieval_context=[one, two, three],
7171
)
72-
model = OpenAI()
73-
metric = FaithfulnessMetric(model=model)
72+
# model = OpenAI()
73+
metric = FaithfulnessMetric()
7474
assert_test(test_case, [metric])
7575

7676

77-
def test_verdict_schema():
77+
# def test_verdict_schema():
7878

79-
judge = CustomJudge("mock")
80-
schema = Verdicts
81-
answer = (
82-
'{\n"verdicts": [\n{\n"verdict": "yes"\n},\n{\n "verdict": "no",\n "reason": "blah blah"\n},'
83-
'\n{\n "verdict": "yes",\n "reason":null \n}\n]\n}'
84-
)
85-
res: Verdicts = judge.generate(answer, schema=schema)
79+
# judge = CustomJudge("mock")
80+
# schema = Verdicts
81+
# answer = (
82+
# '{\n"verdicts": [\n{\n"verdict": "yes"\n},\n{\n "verdict": "no",\n "reason": "blah blah"\n},'
83+
# '\n{\n "verdict": "yes",\n "reason":null \n}\n]\n}'
84+
# )
85+
# res: Verdicts = judge.generate(answer, schema=schema)

0 commit comments

Comments
 (0)