Skip to content

Commit

Permalink
Do not raise exception from OpenAIMetricsCalculator
Browse files Browse the repository at this point in the history
  • Loading branch information
Lina Tang committed Apr 29, 2024
1 parent 0ec2d2c commit 5b8b435
Showing 1 changed file with 46 additions and 33 deletions.
79 changes: 46 additions & 33 deletions src/promptflow-tracing/promptflow/tracing/_openai_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,10 @@ def get_completion_tokens(self, api_call: dict):
# completion tokens when the output is consumed.
completion_tokens = 0
if self._need_collect_metrics(api_call):
completion_tokens += self._get_completion_tokens_for_signal_api(api_call)
try:
completion_tokens += self._get_completion_tokens_for_signal_api(api_call)
except Exception as ex:
self._log_warning(f"Failed to calculate completion tokens due to exception: {ex}.")

children = api_call.get("children")
if children is not None:
Expand Down Expand Up @@ -115,21 +118,26 @@ def _try_get_model(self, inputs, output):

def get_openai_metrics_for_chat_api(self, inputs, output):
metrics = {}
enc, tokens_per_message, tokens_per_name = self._get_encoding_for_chat_api(self._try_get_model(inputs, output))
metrics["prompt_tokens"] = self._get_prompt_tokens_from_messages(
inputs["messages"], enc, tokens_per_message, tokens_per_name
)
if isinstance(output, list):
if IS_LEGACY_OPENAI:
metrics["completion_tokens"] = len(output)
try:
enc, tokens_per_message, tokens_per_name = self._get_encoding_for_chat_api(
self._try_get_model(inputs, output)
)
metrics["prompt_tokens"] = self._get_prompt_tokens_from_messages(
inputs["messages"], enc, tokens_per_message, tokens_per_name
)
if isinstance(output, list):
if IS_LEGACY_OPENAI:
metrics["completion_tokens"] = len(output)
else:
metrics["completion_tokens"] = len(
[chunk for chunk in output if chunk.choices and chunk.choices[0].delta.content]
)
else:
metrics["completion_tokens"] = len(
[chunk for chunk in output if chunk.choices and chunk.choices[0].delta.content]
)
else:
metrics["completion_tokens"] = self._get_completion_tokens_for_chat_api(output, enc)
metrics["total_tokens"] = metrics["prompt_tokens"] + metrics["completion_tokens"]
return metrics
metrics["completion_tokens"] = self._get_completion_tokens_for_chat_api(output, enc)
metrics["total_tokens"] = metrics["prompt_tokens"] + metrics["completion_tokens"]
return metrics
except Exception as ex:
self._log_warning(f"Failed to calculate metrics due to exception: {ex}.")

def _get_encoding_for_chat_api(self, model):
try:
Expand All @@ -143,6 +151,8 @@ def _get_encoding_for_chat_api(self, model):
tokens_per_message = 3
tokens_per_name = 1
else:
tokens_per_message = 3
tokens_per_name = 1
self._log_warning(f"Calculating metrics for model {model} is not supported.")
return enc, tokens_per_message, tokens_per_name

Expand Down Expand Up @@ -172,25 +182,28 @@ def _get_completion_tokens_for_chat_api(self, output, enc):

def get_openai_metrics_for_completion_api(self, inputs, output):
metrics = {}
enc = self._get_encoding_for_completion_api(self._try_get_model(inputs, output))
metrics["prompt_tokens"] = 0
prompt = inputs.get("prompt")
if isinstance(prompt, str):
metrics["prompt_tokens"] = len(enc.encode(prompt))
elif isinstance(prompt, list):
for pro in prompt:
metrics["prompt_tokens"] += len(enc.encode(pro))
if isinstance(output, list):
if IS_LEGACY_OPENAI:
metrics["completion_tokens"] = len(output)
try:
enc = self._get_encoding_for_completion_api(self._try_get_model(inputs, output))
metrics["prompt_tokens"] = 0
prompt = inputs.get("prompt")
if isinstance(prompt, str):
metrics["prompt_tokens"] = len(enc.encode(prompt))
elif isinstance(prompt, list):
for pro in prompt:
metrics["prompt_tokens"] += len(enc.encode(pro))
if isinstance(output, list):
if IS_LEGACY_OPENAI:
metrics["completion_tokens"] = len(output)
else:
metrics["completion_tokens"] = len(
[chunk for chunk in output if chunk.choices and chunk.choices[0].text]
)
else:
metrics["completion_tokens"] = len(
[chunk for chunk in output if chunk.choices and chunk.choices[0].text]
)
else:
metrics["completion_tokens"] = self._get_completion_tokens_for_completion_api(output, enc)
metrics["total_tokens"] = metrics["prompt_tokens"] + metrics["completion_tokens"]
return metrics
metrics["completion_tokens"] = self._get_completion_tokens_for_completion_api(output, enc)
metrics["total_tokens"] = metrics["prompt_tokens"] + metrics["completion_tokens"]
return metrics
except Exception as ex:
self._log_warning(f"Failed to calculate metrics due to exception: {ex}.")

def _get_encoding_for_completion_api(self, model):
try:
Expand Down

0 comments on commit 5b8b435

Please sign in to comment.