[BugFix][Metrics] Fix Prometheus Multiprocess Metrics Issues and Add ZMQ Communication Metrics (#5185)

* [Feature] add metrics for ZMQ and fix multiprocess metrics

* fix test_metrics.py

---------

Co-authored-by: Jiaxin Sui <95567040+plusNew001@users.noreply.github.com>
This commit is contained in:
fl0w2o48
2025-11-27 15:05:09 +08:00
committed by GitHub
parent ce9a49f6bf
commit e63d715fc3
17 changed files with 878 additions and 280 deletions
@@ -39,7 +39,7 @@ from fastdeploy.entrypoints.openai.protocol import (
UsageInfo,
)
from fastdeploy.entrypoints.openai.response_processors import ChatResponseProcessor
from fastdeploy.metrics.work_metrics import work_process_metrics
from fastdeploy.metrics.metrics import main_process_metrics
from fastdeploy.trace.constants import LoggingEventName
from fastdeploy.trace.trace_logger import print as trace_print
from fastdeploy.utils import (
@@ -382,7 +382,7 @@ class OpenAIServingChat:
)
if res["finished"]:
num_choices -= 1
work_process_metrics.e2e_request_latency.observe(
main_process_metrics.e2e_request_latency.observe(
time.time() - res["metrics"]["request_start_time"]
)
if previous_num_tokens[idx] != max_tokens:
@@ -631,7 +631,7 @@ class OpenAIServingChat:
output = data["outputs"]
if output is not None and output.get("metrics") and output["metrics"].get("request_start_time"):
work_process_metrics.e2e_request_latency.observe(
main_process_metrics.e2e_request_latency.observe(
time.time() - data.get("metrics").get("request_start_time")
)
message = ChatMessage(