From 5950ed27490bbfd0a970592a775c14c19f4d5d56 Mon Sep 17 00:00:00 2001 From: octo-patch Date: Wed, 8 Apr 2026 23:13:35 +0800 Subject: [PATCH] fix: resolve ZeroDivisionError and NameError in OpenAICloseSetClsEvaluator.print_results --- pointllm/eval/evaluator.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/pointllm/eval/evaluator.py b/pointllm/eval/evaluator.py index 5a72358..bdbe547 100644 --- a/pointllm/eval/evaluator.py +++ b/pointllm/eval/evaluator.py @@ -399,7 +399,7 @@ def parse_gpt_response_evaluate(self, gpt_response, ground_truth): # * not valid range cls_result = -1 except ValueError: - print(f"Error: unale to parse {gpt_response}.") + print(f"Error: unable to parse {gpt_response}.") cls_result = -1 if cls_result == -1: @@ -562,10 +562,10 @@ def print_results(self): print('-' * 80) if self.total_predictions - self.invalid_responses == 0: accuracy = 0 # * no results and get error + clean_accuracy = 0 else: accuracy = self.correct_predictions / self.total_predictions * 100 clean_accuracy = (self.correct_predictions - self.invalid_correct_predictions) / (self.total_predictions - self.invalid_responses) * 100 - accuracy = self.correct_predictions / self.total_predictions * 100 print("Results:") print(f"Accuracy: {accuracy:.2f}%") print(f"Clean Accuracy: {clean_accuracy:.2f}%",) @@ -638,7 +638,7 @@ def parse_gpt_response_evaluate(self, gpt_response, ground_truth): # * not valid range gpt_score = -1 except ValueError: - print(f"Error: unale to parse {gpt_response}.") + print(f"Error: unable to parse {gpt_response}.") gpt_score = -1 if gpt_score == -1: