Skip to content

Commit

Permalink
Do not raise exception from OpenAIMetricsCalculator
Browse files Browse the repository at this point in the history
  • Loading branch information
Lina Tang committed Apr 29, 2024
1 parent 0ec2d2c commit 0c8d9ae
Showing 1 changed file with 50 additions and 39 deletions.
89 changes: 50 additions & 39 deletions src/promptflow-tracing/promptflow/tracing/_openai_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,10 @@ def get_completion_tokens(self, api_call: dict):
# completion tokens when the output is consumed.
completion_tokens = 0
if self._need_collect_metrics(api_call):
completion_tokens += self._get_completion_tokens_for_signal_api(api_call)
try:
completion_tokens += self._get_completion_tokens_for_signal_api(api_call)
except Exception as ex:
self._log_warning(f"Failed to calculate completion tokens due to exception: {ex}.")

children = api_call.get("children")
if children is not None:
Expand Down Expand Up @@ -68,7 +71,7 @@ def _get_openai_metrics_for_signal_api(self, api_call: dict):
if isinstance(usage, dict):
return usage
self._log_warning(
"Cannot find openai metrics in output, " "will calculate metrics from response data directly."
"Cannot find openai metrics in output, will calculate metrics from response data directly."
)

name = api_call.get("name")
Expand All @@ -84,13 +87,13 @@ def _get_openai_metrics_for_signal_api(self, api_call: dict):
elif name == "openai_completion_legacy" or name == "openai_completion": # openai v1
return self.get_openai_metrics_for_completion_api(inputs, output)
else:
self._log_warning(f"Calculating metrics for api {name} is not supported.")
raise Exception(f"Calculating metrics for api {name} is not supported.")

def _try_get_model(self, inputs, output):
if IS_LEGACY_OPENAI:
api_type = inputs.get("api_type")
if not api_type:
self._log_warning("Cannot calculate metrics for none or empty api_type.")
raise Exception("Cannot calculate metrics for none or empty api_type.")
if api_type == "azure":
model = inputs.get("engine")
else:
Expand All @@ -107,29 +110,34 @@ def _try_get_model(self, inputs, output):
if not model:
model = inputs.get("model")
if not model:
raise self._log_warning(
"Cannot get a valid model to calculate metrics. "
raise Exception(
"Cannot get a valid model to calculate metrics."
"Please specify a engine for AzureOpenAI API or a model for OpenAI API."
)
return model

def get_openai_metrics_for_chat_api(self, inputs, output):
metrics = {}
enc, tokens_per_message, tokens_per_name = self._get_encoding_for_chat_api(self._try_get_model(inputs, output))
metrics["prompt_tokens"] = self._get_prompt_tokens_from_messages(
inputs["messages"], enc, tokens_per_message, tokens_per_name
)
if isinstance(output, list):
if IS_LEGACY_OPENAI:
metrics["completion_tokens"] = len(output)
try:
enc, tokens_per_message, tokens_per_name = self._get_encoding_for_chat_api(
self._try_get_model(inputs, output)
)
metrics["prompt_tokens"] = self._get_prompt_tokens_from_messages(
inputs["messages"], enc, tokens_per_message, tokens_per_name
)
if isinstance(output, list):
if IS_LEGACY_OPENAI:
metrics["completion_tokens"] = len(output)
else:
metrics["completion_tokens"] = len(
[chunk for chunk in output if chunk.choices and chunk.choices[0].delta.content]
)
else:
metrics["completion_tokens"] = len(
[chunk for chunk in output if chunk.choices and chunk.choices[0].delta.content]
)
else:
metrics["completion_tokens"] = self._get_completion_tokens_for_chat_api(output, enc)
metrics["total_tokens"] = metrics["prompt_tokens"] + metrics["completion_tokens"]
return metrics
metrics["completion_tokens"] = self._get_completion_tokens_for_chat_api(output, enc)
metrics["total_tokens"] = metrics["prompt_tokens"] + metrics["completion_tokens"]
return metrics
except Exception as ex:
self._log_warning(f"Failed to calculate metrics due to exception: {ex}.")

def _get_encoding_for_chat_api(self, model):
try:
Expand All @@ -143,7 +151,7 @@ def _get_encoding_for_chat_api(self, model):
tokens_per_message = 3
tokens_per_name = 1
else:
self._log_warning(f"Calculating metrics for model {model} is not supported.")
raise Exception(f"Calculating metrics for model {model} is not supported.")
return enc, tokens_per_message, tokens_per_name

def _get_prompt_tokens_from_messages(self, messages, enc, tokens_per_message, tokens_per_name):
Expand Down Expand Up @@ -172,25 +180,28 @@ def _get_completion_tokens_for_chat_api(self, output, enc):

def get_openai_metrics_for_completion_api(self, inputs, output):
metrics = {}
enc = self._get_encoding_for_completion_api(self._try_get_model(inputs, output))
metrics["prompt_tokens"] = 0
prompt = inputs.get("prompt")
if isinstance(prompt, str):
metrics["prompt_tokens"] = len(enc.encode(prompt))
elif isinstance(prompt, list):
for pro in prompt:
metrics["prompt_tokens"] += len(enc.encode(pro))
if isinstance(output, list):
if IS_LEGACY_OPENAI:
metrics["completion_tokens"] = len(output)
try:
enc = self._get_encoding_for_completion_api(self._try_get_model(inputs, output))
metrics["prompt_tokens"] = 0
prompt = inputs.get("prompt")
if isinstance(prompt, str):
metrics["prompt_tokens"] = len(enc.encode(prompt))
elif isinstance(prompt, list):
for pro in prompt:
metrics["prompt_tokens"] += len(enc.encode(pro))
if isinstance(output, list):
if IS_LEGACY_OPENAI:
metrics["completion_tokens"] = len(output)
else:
metrics["completion_tokens"] = len(
[chunk for chunk in output if chunk.choices and chunk.choices[0].text]
)
else:
metrics["completion_tokens"] = len(
[chunk for chunk in output if chunk.choices and chunk.choices[0].text]
)
else:
metrics["completion_tokens"] = self._get_completion_tokens_for_completion_api(output, enc)
metrics["total_tokens"] = metrics["prompt_tokens"] + metrics["completion_tokens"]
return metrics
metrics["completion_tokens"] = self._get_completion_tokens_for_completion_api(output, enc)
metrics["total_tokens"] = metrics["prompt_tokens"] + metrics["completion_tokens"]
return metrics
except Exception as ex:
self._log_warning(f"Failed to calculate metrics due to exception: {ex}.")

def _get_encoding_for_completion_api(self, model):
try:
Expand Down

0 comments on commit 0c8d9ae

Please sign in to comment.