polish code with new pre-commit rule (#2923)

This commit is contained in:
Zero Rains
2025-07-19 23:19:27 +08:00
committed by GitHub
parent b8676d71a8
commit 25698d56d1
424 changed files with 14307 additions and 13518 deletions
+239 -268
View File
@@ -13,6 +13,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
"""
from __future__ import annotations
import copy
@@ -40,18 +41,21 @@ from fastdeploy.engine.expert_service import start_expert_service
from fastdeploy.engine.request import Request, RequestOutput
from fastdeploy.engine.resource_manager import ResourceManager
from fastdeploy.input.preprocess import InputPreprocessor
from fastdeploy.inter_communicator import (EngineCacheQueue, EngineWorkerQueue,
IPCSignal, ZmqClient)
from fastdeploy.inter_communicator import (
EngineCacheQueue,
EngineWorkerQueue,
IPCSignal,
ZmqClient,
)
from fastdeploy.metrics.metrics import main_process_metrics
from fastdeploy.metrics.trace_util import start_span, start_span_request
from fastdeploy.model_executor.guided_decoding import schema_checker
from fastdeploy.output.token_processor import (TokenProcessor,
WarmUpTokenProcessor)
from fastdeploy.output.token_processor import TokenProcessor, WarmUpTokenProcessor
from fastdeploy.splitwise.splitwise_connector import SplitwiseConnector
from fastdeploy.utils import EngineError, console_logger, llm_logger
class LLMEngine(object):
class LLMEngine:
"""
Engine class responsible for managing the Large Language Model (LLM) operations.
@@ -94,30 +98,28 @@ class LLMEngine(object):
self.running = True
self.scheduler = cfg.scheduler_config.scheduler()
self.input_processor = InputPreprocessor(cfg.tokenizer,
cfg.reasoning_parser,
cfg.limit_mm_per_prompt,
cfg.mm_processor_kwargs,
cfg.enable_mm)
self.input_processor = InputPreprocessor(
cfg.tokenizer,
cfg.reasoning_parser,
cfg.limit_mm_per_prompt,
cfg.mm_processor_kwargs,
cfg.enable_mm,
)
self.start_queue_service()
self.resource_manager = ResourceManager(cfg.max_num_seqs, cfg,
cfg.tensor_parallel_size,
cfg.splitwise_role)
self.resource_manager = ResourceManager(cfg.max_num_seqs, cfg, cfg.tensor_parallel_size, cfg.splitwise_role)
os.environ['INFERENCE_MSG_QUEUE_ID'] = str(
self.cfg.engine_worker_queue_port)
os.environ["INFERENCE_MSG_QUEUE_ID"] = str(self.cfg.engine_worker_queue_port)
self.split_connector = SplitwiseConnector(cfg, self.scheduler,
self.engine_worker_queue,
self.resource_manager)
self.split_connector = SplitwiseConnector(cfg, self.scheduler, self.engine_worker_queue, self.resource_manager)
self.token_processor = TokenProcessor(
cfg=self.cfg,
cached_generated_tokens=self.scheduler,
engine_worker_queue=self.engine_worker_queue,
split_connector=self.split_connector)
split_connector=self.split_connector,
)
self.token_processor.set_resource_manager(self.resource_manager)
self.is_started = False
@@ -129,11 +131,13 @@ class LLMEngine(object):
else:
self.do_profile = 0
self.partial_chunked_tokens = [0] * (
self.cfg.max_num_partial_prefills + 1)
self.partial_chunked_tokens = [0] * (self.cfg.max_num_partial_prefills + 1)
for idx in range(1, self.cfg.max_num_partial_prefills + 1):
self.partial_chunked_tokens[idx] = (self.cfg.max_num_batched_tokens // idx) \
// self.cfg.cache_config.block_size * self.cfg.cache_config.block_size
self.partial_chunked_tokens[idx] = (
(self.cfg.max_num_batched_tokens // idx)
// self.cfg.cache_config.block_size
* self.cfg.cache_config.block_size
)
self.partial_chunked_tokens[idx] = max(1, self.partial_chunked_tokens[idx])
self._finalizer = weakref.finalize(self, self._exit_sub_services)
@@ -168,8 +172,8 @@ class LLMEngine(object):
time.sleep(3)
if self.do_profile == 0 and (
self.cfg.cache_config.enable_prefix_caching \
or self.cfg.splitwise_role != "mixed"):
self.cfg.cache_config.enable_prefix_caching or self.cfg.splitwise_role != "mixed"
):
device_ids = self.cfg.device_ids.split(",")
self.cache_manager_processes = self.resource_manager.cache_manager.launch_cache_manager(
cache_config=self.cfg.cache_config,
@@ -177,16 +181,15 @@ class LLMEngine(object):
device_ids=device_ids,
pod_ip=self.cfg.master_ip,
engine_worker_queue_port=self.cfg.engine_worker_queue_port,
pid_suffix=self.ipc_signal_suffix)
pid_suffix=self.ipc_signal_suffix,
)
self.worker_proc = self._start_worker_service()
console_logger.info("Waitting worker processes ready...")
time.sleep(5)
self.worker_init_status = dict()
if not self.check_worker_initialize_status():
console_logger.error(
"Failed to launch worker processes, check log/workerlog.* for more details."
)
console_logger.error("Failed to launch worker processes, check log/workerlog.* for more details.")
return False
# Start warmup if enabled
@@ -199,17 +202,16 @@ class LLMEngine(object):
self.token_processor.tasks_queue = self.engine_worker_queue
self.insert_task_to_worker_thread = threading.Thread(
target=self._insert_task_to_worker, daemon=True)
self.insert_task_to_worker_thread = threading.Thread(target=self._insert_task_to_worker, daemon=True)
self.insert_task_to_worker_thread.start()
if self.api_server_pid is not None:
self.insert_task_to_scheduler_thread = threading.Thread(
target=self._insert_zmq_task_to_scheduler, daemon=True)
target=self._insert_zmq_task_to_scheduler, daemon=True
)
self.insert_task_to_scheduler_thread.start()
self.receive_output_thread = threading.Thread(
target=self._zmq_send_generated_tokens, daemon=True)
self.receive_output_thread = threading.Thread(target=self._zmq_send_generated_tokens, daemon=True)
self.receive_output_thread.start()
# Start TokenProcessor thread
@@ -223,8 +225,7 @@ class LLMEngine(object):
self.engine_worker_queue.available_prefill_instances.put(1)
self.split_mode_get_tasks()
if self.cfg.scheduler_config.name == "splitwise":
self.splitwise_receive_thread = threading.Thread(
target=self.split_connector.start_receiver, args=())
self.splitwise_receive_thread = threading.Thread(target=self.split_connector.start_receiver, args=())
self.splitwise_receive_thread.daemon = True
self.splitwise_receive_thread.start()
@@ -240,20 +241,28 @@ class LLMEngine(object):
if self.cfg.parallel_config.enable_expert_parallel and self.cfg.parallel_config.data_parallel_size > 1:
self.dp_processed = []
for i in range(1, self.cfg.parallel_config.data_parallel_size // self.cfg.nnode):
for i in range(
1,
self.cfg.parallel_config.data_parallel_size // self.cfg.nnode,
):
time.sleep(1)
self.dp_processed.append(
multiprocessing.Process(target=start_expert_service,
args=(self.cfg,
i + self.cfg.node_rank * self.cfg.worker_num_per_node,
self.ipc_signal_suffix)))
llm_logger.info(f"Engine is initialized successfully with {self.cfg.tensor_parallel_size}" \
+ " data parallel id {}".format(i))
multiprocessing.Process(
target=start_expert_service,
args=(
self.cfg,
i + self.cfg.node_rank * self.cfg.worker_num_per_node,
self.ipc_signal_suffix,
),
)
)
llm_logger.info(
f"Engine is initialized successfully with {self.cfg.tensor_parallel_size}"
+ f" data parallel id {i}"
)
self.dp_processed[-1].start()
console_logger.info(
"Worker processes are launched with {} seconds.".format(
time.time() - start_time))
console_logger.info(f"Worker processes are launched with {time.time() - start_time} seconds.")
return True
def _zmq_send_generated_tokens(self):
@@ -271,8 +280,7 @@ class LLMEngine(object):
self.zmq_server.send_multipart(request_id, contents)
except Exception as e:
llm_logger.error("Unexcepted error happend: {}, {}".format(
e, str(traceback.format_exc())))
llm_logger.error(f"Unexcepted error happend: {e}, {traceback.format_exc()!s}")
def _get_generated_result(self):
"""
@@ -296,8 +304,7 @@ class LLMEngine(object):
time.sleep(0.001)
continue
if self.exist_prefill_task_signal.value[0] > 0:
if self.cfg.splitwise_role == "mixed" or \
self.split_connector.has_splitwise_tasks():
if self.cfg.splitwise_role == "mixed" or self.split_connector.has_splitwise_tasks():
time.sleep(0.005)
continue
if self.engine_worker_queue.num_cache_infos() > 0:
@@ -309,17 +316,17 @@ class LLMEngine(object):
num_prefill_batch = min(
int(self.resource_manager.available_batch()),
self.cfg.max_prefill_batch)
self.cfg.max_prefill_batch,
)
self.resource_manager.check_and_free_block_tables()
tasks = self.scheduler.get_requests(
available_blocks=self.resource_manager.available_block_num(
),
available_blocks=self.resource_manager.available_block_num(),
block_size=self.cfg.cache_config.block_size,
reserved_output_blocks=self.cfg.cache_config.
enc_dec_block_num,
reserved_output_blocks=self.cfg.cache_config.enc_dec_block_num,
max_num_batched_tokens=self.cfg.max_num_batched_tokens,
batch=num_prefill_batch)
batch=num_prefill_batch,
)
if len(tasks) == 0:
time.sleep(0.001)
@@ -328,16 +335,14 @@ class LLMEngine(object):
current_id = (current_id + 1) % 100003
if self.cfg.splitwise_role != "mixed":
llm_logger.info("Inserting splitwise tasks")
self.split_connector.send_splitwise_tasks(
tasks, current_id)
self.split_connector.send_splitwise_tasks(tasks, current_id)
self.insert_tasks(tasks, current_id)
main_process_metrics.num_requests_waiting.dec(len(tasks))
main_process_metrics.num_requests_running.inc(len(tasks))
except Exception as e:
err_msg = "Error happend while insert task to engine: {}, {}.".format(
e, str(traceback.format_exc()))
err_msg = f"Error happend while insert task to engine: {e}, {traceback.format_exc()!s}."
llm_logger.error(err_msg)
def _insert_zmq_task_to_scheduler(self):
@@ -353,8 +358,7 @@ class LLMEngine(object):
else:
err, data = self.zmq_server.receive_pyobj_once(block)
if err is not None:
llm_logger.error(
"Engine stops inserting zmq task into scheduler")
llm_logger.error("Engine stops inserting zmq task into scheduler")
break
request, insert_task = None, []
@@ -363,13 +367,11 @@ class LLMEngine(object):
request = Request.from_dict(data)
start_span("ENQUEUE_ZMQ", data, trace.SpanKind.PRODUCER)
llm_logger.debug(f"Receive request: {request}")
err_msg = None
if self.guided_decoding_checker is not None:
request, err_msg = self.guided_decoding_checker.schema_format(
request)
request, err_msg = self.guided_decoding_checker.schema_format(request)
if err_msg is not None:
llm_logger.error(err_msg)
@@ -394,17 +396,20 @@ class LLMEngine(object):
main_process_metrics.num_requests_waiting.inc(1)
continue
error_result = RequestOutput(request_id=request_id,
finished=True,
error_code=500,
error_msg=failed)
error_result = RequestOutput(
request_id=request_id,
finished=True,
error_code=500,
error_msg=failed,
)
# Since the request is not in scheduler
# Send result by zmq directly
self.zmq_server.send_multipart(request_id, error_result)
except Exception as e:
llm_logger.error(
f"Error happend while receving new request from zmq, details={e}, "
f"traceback={traceback.format_exc()}")
f"traceback={traceback.format_exc()}"
)
def add_requests(self, task, sampling_params=None, **kwargs):
"""
@@ -428,23 +433,25 @@ class LLMEngine(object):
enable_thinking = None
if kwargs is not None:
enable_thinking = kwargs.get("enable_thinking", None)
request = self.data_processor.process_request(
request, self.cfg.max_model_len, enable_thinking=enable_thinking)
request = self.data_processor.process_request(request, self.cfg.max_model_len, enable_thinking=enable_thinking)
request.prompt_token_ids_len = len(request.prompt_token_ids)
input_ids_len = request.prompt_token_ids_len
request.set(
"max_tokens",
min(self.cfg.max_model_len - input_ids_len,
request.get("max_tokens")))
min(
self.cfg.max_model_len - input_ids_len,
request.get("max_tokens"),
),
)
if request.get("reasoning_max_tokens") is None:
default_reasoning_max_tokens = max(
int(request.get("max_tokens") * 0.8), 1)
default_reasoning_max_tokens = max(int(request.get("max_tokens") * 0.8), 1)
request.set("reasoning_max_tokens", default_reasoning_max_tokens)
min_tokens = request.get("min_tokens")
if input_ids_len + min_tokens >= self.cfg.max_model_len:
error_msg = (
f"Input text is too long, length of prompt token({input_ids_len}) "
f"+ min_dec_len ({min_tokens}) >= max_model_len ")
f"+ min_dec_len ({min_tokens}) >= max_model_len "
)
llm_logger.error(error_msg)
raise EngineError(error_msg, error_code=400)
@@ -456,16 +463,14 @@ class LLMEngine(object):
raise EngineError(error_msg, error_code=400)
if self.guided_decoding_checker is not None:
request, err_msg = self.guided_decoding_checker.schema_format(
request)
request, err_msg = self.guided_decoding_checker.schema_format(request)
if err_msg is not None:
llm_logger.error(err_msg)
raise EngineError(err_msg, error_code=400)
request.preprocess_end_time = time.time()
self.scheduler.put_requests([request])
llm_logger.info(
f"Cache task with request_id ({request.get('request_id')})")
llm_logger.info(f"Cache task with request_id ({request.get('request_id')})")
llm_logger.debug(f"cache task: {request}")
def warmup(self):
@@ -486,25 +491,19 @@ class LLMEngine(object):
processed_indices = []
for idx, task in enumerate(self.waiting_requests):
if self.resource_manager.is_resource_sufficient(
task.prompt_token_ids_len):
if self.resource_manager.is_resource_sufficient(task.prompt_token_ids_len):
self.insert_tasks([task])
llm_logger.info(
f"Resource available, processing task {task.request_id}"
)
llm_logger.info(f"Resource available, processing task {task.request_id}")
processed_indices.append(idx)
else:
llm_logger.debug(
f"Still waiting for resources {task.request_id}"
)
llm_logger.debug(f"Still waiting for resources {task.request_id}")
break
for idx in sorted(processed_indices, reverse=True):
self.waiting_requests.pop(idx)
if not self.engine_worker_queue.disaggregate_queue_empty():
items = self.engine_worker_queue.get_disaggregated_tasks(
)
items = self.engine_worker_queue.get_disaggregated_tasks()
for item in items:
role = item[0]
tasks = item[1]
@@ -515,7 +514,7 @@ class LLMEngine(object):
self.insert_tasks(tasks)
elif role == "decode":
if hasattr(tasks[0], 'finished'):
if hasattr(tasks[0], "finished"):
if not isinstance(tasks, list):
tasks = [tasks]
for task in tasks:
@@ -527,25 +526,19 @@ class LLMEngine(object):
else:
if len(self.waiting_requests):
llm_logger.info(
f"Waiting for resource for task {tasks[0].request_id}"
)
llm_logger.info(f"Waiting for resource for task {tasks[0].request_id}")
self.waiting_requests.extend(tasks)
else:
new_waiting = []
for task in tasks:
if self.resource_manager.is_resource_sufficient(
task.prompt_token_ids_len):
if self.resource_manager.is_resource_sufficient(task.prompt_token_ids_len):
self.insert_tasks([task])
else:
new_waiting.append(task)
if new_waiting:
self.waiting_requests.extend(
new_waiting)
llm_logger.info(
f"Added {len(new_waiting)} tasks to waiting queue"
)
self.waiting_requests.extend(new_waiting)
llm_logger.info(f"Added {len(new_waiting)} tasks to waiting queue")
else:
time.sleep(0.001)
@@ -572,13 +565,10 @@ class LLMEngine(object):
if current_request_size[idx] <= 0:
chunk_request_num -= 1
if not self.cfg.cache_config.enable_chunked_prefill or len(
requests) == 0:
if not self.cfg.cache_config.enable_chunked_prefill or len(requests) == 0:
return
current_request_size = [
request.prompt_token_ids_len for request in requests
]
current_request_size = [request.prompt_token_ids_len for request in requests]
requests_chunk = [[] for _ in range(len(requests))]
chunk_request_num = len(current_request_size)
while chunk_request_num >= 1:
@@ -588,25 +578,25 @@ class LLMEngine(object):
continue
chunk_size = min(
current_request_size[idx],
self.partial_chunked_tokens[chunk_request_num])
self.partial_chunked_tokens[chunk_request_num],
)
update_tokens(idx, chunk_size)
while remain_batched_tokens >= self.cfg.cache_config.block_size:
# 当前 max_num_batched_tokens 还有剩余时,优先分配给较短的请求
waiting_requests = [
input_lens for input_lens in current_request_size
if input_lens > 0
]
waiting_requests = [input_lens for input_lens in current_request_size if input_lens > 0]
if len(waiting_requests) == 0:
break
available_tokens = remain_batched_tokens // self.cfg.cache_config.block_size * \
self.cfg.cache_config.block_size
available_tokens = (
remain_batched_tokens // self.cfg.cache_config.block_size * self.cfg.cache_config.block_size
)
append_idx = current_request_size.index(min(waiting_requests))
chunk_size = min(
current_request_size[append_idx],
self.partial_chunked_tokens[chunk_request_num],
available_tokens)
available_tokens,
)
update_tokens(append_idx, chunk_size, update_chunk=True)
for idx in range(len(requests)):
@@ -616,8 +606,7 @@ class LLMEngine(object):
"""
update each multimodal request's chunk size info
"""
if not self.cfg.cache_config.enable_chunked_prefill or len(
requests) == 0:
if not self.cfg.cache_config.enable_chunked_prefill or len(requests) == 0:
return
for request in requests:
@@ -628,12 +617,9 @@ class LLMEngine(object):
inputs["grid_thw"] = np.array([], dtype="int64")
inputs["images"] = np.array([], dtype="uint8")
input_ids = paddle.to_tensor(inputs["input_ids"], dtype="int64")
image_type_ids = paddle.to_tensor(inputs["image_type_ids"],
dtype="int32")
image_type_ids = paddle.to_tensor(inputs["image_type_ids"], dtype="int32")
image_mask = input_ids == self.data_processor.image_patch_id
image_token_sum = paddle.full(shape=[len(input_ids) + 1],
fill_value=0,
dtype="int32")
image_token_sum = paddle.full(shape=[len(input_ids) + 1], fill_value=0, dtype="int32")
image_token_sum[1:] = paddle.cumsum(image_mask.cast("int32"))
grid_thw = []
for one in inputs["grid_thw"]:
@@ -644,45 +630,46 @@ class LLMEngine(object):
grid_thw = paddle.to_tensor(grid_thw, dtype="int64")
from fastdeploy.model_executor.ops.gpu import get_mm_split_fuse
chunk_image_num, chunk_seq_len = get_mm_split_fuse(
input_ids, image_type_ids, image_token_sum, grid_thw,
self.data_processor.image_patch_id, len(grid_thw), 0,
len(input_ids), 0, self.partial_chunked_tokens[1], 2048)
input_ids,
image_type_ids,
image_token_sum,
grid_thw,
self.data_processor.image_patch_id,
len(grid_thw),
0,
len(input_ids),
0,
self.partial_chunked_tokens[1],
2048,
)
grid_thw = grid_thw.numpy().reshape([-1, 3])
num_chunks = len(chunk_image_num)
chunks_info = []
input_ids_st, image_type_ids_st, grid_thw_st, patch_st = 0, 0, 0, 0
for idx in range(num_chunks):
chunk_input_ids = inputs["input_ids"][
input_ids_st:input_ids_st + chunk_seq_len[idx]]
chunk_token_type_ids = inputs["token_type_ids"][
input_ids_st:input_ids_st + chunk_seq_len[idx]]
actual_image_num = np.sum(grid_thw[grid_thw_st:grid_thw_st +
chunk_image_num[idx], 0])
chunk_input_ids = inputs["input_ids"][input_ids_st : input_ids_st + chunk_seq_len[idx]]
chunk_token_type_ids = inputs["token_type_ids"][input_ids_st : input_ids_st + chunk_seq_len[idx]]
actual_image_num = np.sum(grid_thw[grid_thw_st : grid_thw_st + chunk_image_num[idx], 0])
chunk_image_type_ids = inputs["image_type_ids"][
image_type_ids_st:image_type_ids_st + actual_image_num]
chunk_grid_thw = grid_thw[grid_thw_st:grid_thw_st +
chunk_image_num[idx]]
image_type_ids_st : image_type_ids_st + actual_image_num
]
chunk_grid_thw = grid_thw[grid_thw_st : grid_thw_st + chunk_image_num[idx]]
chunk_patch_num = np.sum(np.prod(chunk_grid_thw, axis=1))
chunk_images = inputs["images"][patch_st:patch_st +
chunk_patch_num]
chunk_images = inputs["images"][patch_st : patch_st + chunk_patch_num]
chunks_info.append({
"input_ids":
chunk_input_ids,
"token_type_ids":
chunk_token_type_ids,
"image_type_ids":
chunk_image_type_ids
if chunk_image_type_ids.shape[0] else None,
"grid_thw":
chunk_grid_thw if chunk_grid_thw.shape[0] else None,
"images":
chunk_images if chunk_images.shape[0] else None,
"position_ids":
None
})
chunks_info.append(
{
"input_ids": chunk_input_ids,
"token_type_ids": chunk_token_type_ids,
"image_type_ids": (chunk_image_type_ids if chunk_image_type_ids.shape[0] else None),
"grid_thw": (chunk_grid_thw if chunk_grid_thw.shape[0] else None),
"images": (chunk_images if chunk_images.shape[0] else None),
"position_ids": None,
}
)
input_ids_st += chunk_seq_len[idx]
image_type_ids_st += actual_image_num
@@ -704,18 +691,14 @@ class LLMEngine(object):
del self.resource_manager.req_dict[task.request_id]
cur_task = self.resource_manager.tasks_list[cur_task_idx]
cur_task.prompt_token_ids[0] = task.outputs.token_ids[0]
if self.cfg.speculative_config.method in [
"mtp"
] and self.cfg.splitwise_role == "decode":
cur_task.draft_token_ids = copy.deepcopy(
task.outputs.draft_token_ids)
if self.cfg.speculative_config.method in ["mtp"] and self.cfg.splitwise_role == "decode":
cur_task.draft_token_ids = copy.deepcopy(task.outputs.draft_token_ids)
if task.error_code != 200:
self.resource_manager.stop_flags[cur_task_idx] = True
self.resource_manager.tasks_list[cur_task_idx] = None
self.resource_manager._recycle_block_tables(cur_task)
if task.request_id in self.token_processor.tokens_counter:
del self.token_processor.tokens_counter[
task.request_id]
del self.token_processor.tokens_counter[task.request_id]
self.scheduler.put_results([task])
llm_logger.warning(
f"{task.request_id} prefill failed with msg:{task.error_msg}, recycle resource."
@@ -723,8 +706,7 @@ class LLMEngine(object):
continue
self.token_processor.tokens_counter[task.request_id] = 1
current_tasks.append(cur_task)
self.engine_worker_queue.put_tasks(
(current_tasks, self.resource_manager.real_bsz))
self.engine_worker_queue.put_tasks((current_tasks, self.resource_manager.real_bsz))
return True
self.resource_manager.check_and_free_block_tables()
@@ -737,9 +719,7 @@ class LLMEngine(object):
available_batch = np.sum(self.resource_manager.stop_flags)
if len(tasks) > available_batch:
llm_logger.error(
"Inserting batch:{} exceeds the available batch:{}.".format(
len(tasks), available_batch))
llm_logger.error(f"Inserting batch:{len(tasks)} exceeds the available batch:{available_batch}.")
llm_logger.error("The exceeded part will be ignored!")
tasks = tasks[:available_batch]
@@ -763,8 +743,7 @@ class LLMEngine(object):
is_decode = True
else:
is_prefill = True
self.token_processor.number_of_input_tokens += tasks[
i].prompt_token_ids_len
self.token_processor.number_of_input_tokens += tasks[i].prompt_token_ids_len
self.split_connector.send_cache_infos(tasks, current_id)
if not is_decode:
@@ -776,8 +755,7 @@ class LLMEngine(object):
self.update_requests_chunk_size(tasks)
else:
self.update_mm_requests_chunk_size(tasks)
self.engine_worker_queue.put_tasks(
(tasks, self.resource_manager.real_bsz))
self.engine_worker_queue.put_tasks((tasks, self.resource_manager.real_bsz))
if is_prefill and self.cfg.scheduler_config.name != "splitwise":
self.engine_worker_queue.available_prefill_instances.put(1)
return True
@@ -793,8 +771,7 @@ class LLMEngine(object):
"""
judge if all tasks are finished
"""
return np.sum(self.resource_manager.stop_flags) == len(
self.resource_manager.stop_flags)
return np.sum(self.resource_manager.stop_flags) == len(self.resource_manager.stop_flags)
def _set_warmup_token_processor(self):
"""
@@ -824,8 +801,7 @@ class LLMEngine(object):
judge if all worker processes are ready
"""
if np.sum(self.worker_ready_signal.value
) == self.cfg.worker_num_per_node:
if np.sum(self.worker_ready_signal.value) == self.cfg.worker_num_per_node:
return True
return False
@@ -835,30 +811,33 @@ class LLMEngine(object):
"""
# worker_ready_signatensor_parallel_size
worker_ready_signal_data = np.zeros(shape=[self.cfg.worker_num_per_node], dtype=np.int32)
self.worker_ready_signal = IPCSignal(name="worker_ready_signal",
array=worker_ready_signal_data,
dtype=np.int32,
suffix=self.ipc_signal_suffix,
create=True)
self.worker_ready_signal = IPCSignal(
name="worker_ready_signal",
array=worker_ready_signal_data,
dtype=np.int32,
suffix=self.ipc_signal_suffix,
create=True,
)
# exist_task_signal 用于各worker进程感知是否有新Task需要处理
exist_task_signal_data = np.zeros(
[self.cfg.parallel_config.data_parallel_size], dtype=np.int32)
self.exist_task_signal = IPCSignal(name="exist_task_signal",
array=exist_task_signal_data,
dtype=np.int32,
suffix=self.ipc_signal_suffix,
create=True)
exist_task_signal_data = np.zeros([self.cfg.parallel_config.data_parallel_size], dtype=np.int32)
self.exist_task_signal = IPCSignal(
name="exist_task_signal",
array=exist_task_signal_data,
dtype=np.int32,
suffix=self.ipc_signal_suffix,
create=True,
)
# exist_swapped_task_signal 用于engine感知worker中是否存在swapped task
exist_swapped_task_signal_data = np.zeros(
[self.cfg.parallel_config.data_parallel_size], dtype=np.int32)
exist_swapped_task_signal_data = np.zeros([self.cfg.parallel_config.data_parallel_size], dtype=np.int32)
self.exist_swapped_task_signal = IPCSignal(
name="exist_swapped_task_signal",
array=exist_swapped_task_signal_data,
dtype=np.int32,
suffix=self.ipc_signal_suffix,
create=True)
create=True,
)
# exist_prefill_task_signal 用于各worker进程感知是否进行prefill
exist_prefill_task_signal_data = np.zeros([1], dtype=np.int32)
@@ -867,17 +846,18 @@ class LLMEngine(object):
array=exist_prefill_task_signal_data,
dtype=np.int32,
suffix=self.ipc_signal_suffix,
create=True)
create=True,
)
# worker_live_signal 用于engine感知各worker进程是否存活,记录每个step 时间
worker_healthy_live_recorded_time_array = np.zeros(shape=[self.cfg.worker_num_per_node],
dtype=np.int32)
worker_healthy_live_recorded_time_array = np.zeros(shape=[self.cfg.worker_num_per_node], dtype=np.int32)
self.worker_healthy_live_signal = IPCSignal(
name="worker_healthy_live_signal",
array=worker_healthy_live_recorded_time_array,
dtype=np.int32,
suffix=self.ipc_signal_suffix,
create=True)
create=True,
)
if self.do_profile:
get_profile_block_num = np.zeros([self.cfg.worker_num_per_node], dtype=np.int32)
@@ -886,7 +866,8 @@ class LLMEngine(object):
array=get_profile_block_num,
dtype=np.int32,
suffix=self.ipc_signal_suffix,
create=True)
create=True,
)
model_weights_status = np.zeros([1], dtype=np.int32)
self.model_weights_status_signal = IPCSignal(
@@ -894,7 +875,8 @@ class LLMEngine(object):
array=model_weights_status,
dtype=np.int32,
suffix=self.ipc_signal_suffix,
create=True)
create=True,
)
def _exit_sub_services(self):
"""
@@ -903,8 +885,7 @@ class LLMEngine(object):
self.running = False
if hasattr(self, "cache_manager_processes"):
self.resource_manager.cache_manager.shm_cache_task_flag_broadcast.clear(
)
self.resource_manager.cache_manager.shm_cache_task_flag_broadcast.clear()
self.resource_manager.cache_manager.cache_ready_signal.clear()
for p in self.cache_manager_processes:
llm_logger.info(f"Killing cache manager process {p.pid}")
@@ -943,7 +924,7 @@ class LLMEngine(object):
"TRAINER_INSTANCES_NUM": 1,
"TRAINER_INSTANCES": "0.0.0.0",
"ENABLE_FASTDEPLOY_LOAD_MODEL_CONCURRENCY": 0,
"LOAD_STATE_DICT_THREAD_NUM": len(self.cfg.device_ids.split(',')),
"LOAD_STATE_DICT_THREAD_NUM": len(self.cfg.device_ids.split(",")),
"PROTOCOL_BUFFERS_PYTHON_IMPLEMENTATION": "python",
"FLAGS_use_append_attn": 1,
"NCCL_ALGO": "Ring",
@@ -951,24 +932,22 @@ class LLMEngine(object):
"FLAGS_hardamard_moe_block_size": 128,
}
# environment variables needed by Dy2St
variables.update({
"SOT_LOG_LEVEL":
os.getenv("SOT_LOG_LEVEL", default="0"),
"SOT_UNSAFE_CACHE_FASTPATH":
os.getenv("SOT_UNSAFE_CACHE_FASTPATH", default="1"),
"SOT_ENABLE_0_SIZE_FALLBACK":
os.getenv("SOT_ENABLE_0_SIZE_FALLBACK", default="0"),
"FLAGS_specialize_device_in_dy2st":
os.getenv("FLAGS_specialize_device_in_dy2st", default="1"),
"FLAGS_enable_async_fast_gc":
os.getenv("FLAGS_enable_async_fast_gc", default="0"),
"FLAGS_pir_interpreter_record_stream_for_gc_cache":
os.getenv("FLAGS_pir_interpreter_record_stream_for_gc_cache",
default="1"),
"FLAGS_parameters_persistent_mode_in_dy2st":
os.getenv("FLAGS_parameters_persistent_mode_in_dy2st",
default="1"),
})
variables.update(
{
"SOT_LOG_LEVEL": os.getenv("SOT_LOG_LEVEL", default="0"),
"SOT_UNSAFE_CACHE_FASTPATH": os.getenv("SOT_UNSAFE_CACHE_FASTPATH", default="1"),
"SOT_ENABLE_0_SIZE_FALLBACK": os.getenv("SOT_ENABLE_0_SIZE_FALLBACK", default="0"),
"FLAGS_specialize_device_in_dy2st": os.getenv("FLAGS_specialize_device_in_dy2st", default="1"),
"FLAGS_enable_async_fast_gc": os.getenv("FLAGS_enable_async_fast_gc", default="0"),
"FLAGS_pir_interpreter_record_stream_for_gc_cache": os.getenv(
"FLAGS_pir_interpreter_record_stream_for_gc_cache",
default="1",
),
"FLAGS_parameters_persistent_mode_in_dy2st": os.getenv(
"FLAGS_parameters_persistent_mode_in_dy2st", default="1"
),
}
)
if self.cfg.splitwise_role != "mixed":
variables["FLAGS_use_pd_disaggregation"] = 1
@@ -994,8 +973,7 @@ class LLMEngine(object):
current_file_path = os.path.abspath(__file__)
current_dir_path = os.path.split(current_file_path)[0]
# TODO
uncache_worker_stdout = "" if os.getenv("UNCACHE_WORKER_STDOUT",
"0") == 1 else "-u"
uncache_worker_stdout = "" if os.getenv("UNCACHE_WORKER_STDOUT", "0") == 1 else "-u"
pd_cmd = f"{command_prefix} {sys.executable} {uncache_worker_stdout} -m paddle.distributed.launch"
pd_cmd = pd_cmd + f" --log_dir {log_dir}"
@@ -1004,7 +982,7 @@ class LLMEngine(object):
ori_vocab_size = (
len(self.data_processor.tokenizer.sp_model)
if hasattr(self.data_processor.tokenizer, 'sp_model')
if hasattr(self.data_processor.tokenizer, "sp_model")
else len(self.data_processor.tokenizer.vocab)
)
@@ -1012,10 +990,10 @@ class LLMEngine(object):
f" --devices {self.cfg.device_ids} {py_script}"
f" --max_num_seqs {self.cfg.max_num_seqs} --max_model_len {self.cfg.max_model_len}"
f" --gpu_memory_utilization {self.cfg.cache_config.gpu_memory_utilization}"
f" --model_name_or_path {str(self.cfg.model_name_or_path)}"
f" --model_name_or_path {self.cfg.model_name_or_path!s}"
f" --device_ids {self.cfg.device_ids}"
f" --tensor_parallel_size {self.cfg.tensor_parallel_size}"
f" --engine_worker_queue_port {str(self.cfg.engine_worker_queue_port)}"
f" --engine_worker_queue_port {self.cfg.engine_worker_queue_port!s}"
f" --pod_ip {self.cfg.master_ip}"
f" --total_block_num {self.cfg.cache_config.total_block_num}"
f" --block_size {self.cfg.cache_config.block_size}"
@@ -1036,16 +1014,13 @@ class LLMEngine(object):
f" --speculative_benchmark_mode {self.cfg.speculative_config.benchmark_mode}"
f" --graph_optimization_config '{self.cfg.graph_optimization_config.to_json_string()}'"
f" --guided_decoding_backend {self.cfg.guided_decoding_backend}"
f" --load_strategy {self.cfg.model_config.load_strategy}")
f" --load_strategy {self.cfg.model_config.load_strategy}"
)
worker_append_flag = {
"enable_expert_parallel":
self.cfg.parallel_config.enable_expert_parallel,
"enable_prefix_caching":
self.cfg.cache_config.enable_prefix_caching,
"enable_chunked_prefill":
self.cfg.cache_config.enable_chunked_prefill,
"enable_expert_parallel": self.cfg.parallel_config.enable_expert_parallel,
"enable_prefix_caching": self.cfg.cache_config.enable_prefix_caching,
"enable_chunked_prefill": self.cfg.cache_config.enable_chunked_prefill,
"do_profile": self.do_profile,
"dynamic_load_weight": self.cfg.model_config.dynamic_load_weight,
"disable_any_whitespace": self.cfg.disable_any_whitespace,
@@ -1059,11 +1034,11 @@ class LLMEngine(object):
if self.cfg.nnode > 1:
pd_cmd = pd_cmd + (
f" --master {self.cfg.dist_init_addr}"
f" --nnodes {str(self.cfg.nnode)}"
f" --rank {str(self.cfg.node_rank)}"
f" --nnodes {self.cfg.nnode!s}"
f" --rank {self.cfg.node_rank!s}"
)
pd_cmd = pd_cmd + arguments + f" 2>{log_dir}/launch_worker.log"
llm_logger.info("Launch worker service command: {}".format(pd_cmd))
llm_logger.info(f"Launch worker service command: {pd_cmd}")
p = subprocess.Popen(
pd_cmd,
stdout=subprocess.PIPE,
@@ -1111,8 +1086,7 @@ class LLMEngine(object):
try:
req_id = self._format_and_add_data(prompts)
except Exception as e:
llm_logger.error(
f"Error happend while adding request, details={e}")
llm_logger.error(f"Error happend while adding request, details={e}")
raise EngineError(str(e), error_code=400)
# 获取当前请求的结果
@@ -1151,8 +1125,7 @@ class LLMEngine(object):
if num_gpu_blocks < 0:
num_gpu_blocks = self.get_profile_block_num_signal.value[i]
else:
num_gpu_blocks = min(
num_gpu_blocks, self.get_profile_block_num_signal.value[i])
num_gpu_blocks = min(num_gpu_blocks, self.get_profile_block_num_signal.value[i])
self.cfg.cache_config.reset(num_gpu_blocks)
self.resource_manager.reset_cache_config(self.cfg.cache_config)
@@ -1164,15 +1137,16 @@ class LLMEngine(object):
device_ids=device_ids,
pod_ip=self.cfg.master_ip,
engine_worker_queue_port=self.cfg.engine_worker_queue_port,
pid_suffix=self.ipc_signal_suffix)
pid_suffix=self.ipc_signal_suffix,
)
def check_health(self, time_interval_threashold=30):
"""
Check the health of the model server by checking whether all workers are alive.
"""
if self.worker_healthy_live_signal.value[0]:
elapsed_time = time.time() - \
self.worker_healthy_live_signal.value[0]
elapsed_time = time.time() - self.worker_healthy_live_signal.value[0]
if elapsed_time > time_interval_threashold:
return False, "Worker Service Not Healthy"
@@ -1185,38 +1159,31 @@ class LLMEngine(object):
def detect_thread():
for line in self.worker_proc.stdout:
line = line.decode('utf-8', errors='ignore')
line = line.decode("utf-8", errors="ignore")
if self.worker_init_status.get("finished", False):
break
if match := re.search(
r'Loading (?:fastsafetensors |safetensors )?checkpoint shards:\s*(\d+)',
line):
self.worker_init_status["weight_loadding"] = eval(
match.group(1)) * 1.0 / 100
elif (match := re.search(r'Start load layer (\d+)',
line)) or (match := re.search(
r'set state for layer (\d+)',
line)):
progress = eval(match.group(
1)) * 1.0 / self.cfg.model_config.num_layers
r"Loading (?:fastsafetensors |safetensors )?checkpoint shards:\s*(\d+)",
line,
):
self.worker_init_status["weight_loadding"] = eval(match.group(1)) * 1.0 / 100
elif (match := re.search(r"Start load layer (\d+)", line)) or (
match := re.search(r"set state for layer (\d+)", line)
):
progress = eval(match.group(1)) * 1.0 / self.cfg.model_config.num_layers
self.worker_init_status["layer_loadding"] = progress
if self.worker_init_status[
"layer_loadding"] == self.cfg.model_config.num_layers - 1:
if self.worker_init_status["layer_loadding"] == self.cfg.model_config.num_layers - 1:
self.worker_init_status["finished"] = True
self.checking_worker_status_thread = threading.Thread(
target=detect_thread, daemon=True)
self.checking_worker_status_thread = threading.Thread(target=detect_thread, daemon=True)
self.checking_worker_status_thread.start()
# display weight loadding progress
with tqdm(total=100, desc="Loading Weights") as pbar:
progress = 0
while progress < 100:
progress = int(
self.worker_init_status.get("weight_loadding", 0) * 100)
if self.worker_init_status.get(
"layer_loadding",
0) > 0 or self._worker_processes_ready():
progress = int(self.worker_init_status.get("weight_loadding", 0) * 100)
if self.worker_init_status.get("layer_loadding", 0) > 0 or self._worker_processes_ready():
progress = 100
pbar.update(progress - pbar.n)
pbar.refresh()
@@ -1228,8 +1195,7 @@ class LLMEngine(object):
with tqdm(total=100, desc="Loading Layers") as pbar:
progress = 0
while progress < 100:
progress = int(
self.worker_init_status.get("layer_loadding", 0) * 100)
progress = int(self.worker_init_status.get("layer_loadding", 0) * 100)
if self._worker_processes_ready():
progress = 100
pbar.update(progress - pbar.n)
@@ -1256,19 +1222,21 @@ class LLMEngine(object):
address=address,
is_server=True,
num_client=self.cfg.tensor_parallel_size,
local_data_parallel_size=self.cfg.parallel_config.
data_parallel_size)
local_data_parallel_size=self.cfg.parallel_config.data_parallel_size,
)
if self.cfg.cache_config.enable_prefix_caching or self.cfg.splitwise_role != 'mixed':
if self.cfg.cache_config.enable_prefix_caching or self.cfg.splitwise_role != "mixed":
self.cache_task_queue = EngineCacheQueue(
address=(self.cfg.master_ip, self.cfg.cache_config.cache_queue_port),
authkey=b'cache_queue_service',
address=(
self.cfg.master_ip,
self.cfg.cache_config.cache_queue_port,
),
authkey=b"cache_queue_service",
is_server=True,
num_client=self.cfg.tensor_parallel_size,
client_id=-1,
local_data_parallel_size=self.cfg.parallel_config.
data_parallel_size)
local_data_parallel_size=self.cfg.parallel_config.data_parallel_size,
)
self.engine_worker_queue = EngineWorkerQueue(
address=address,
@@ -1276,5 +1244,8 @@ class LLMEngine(object):
num_client=self.cfg.tensor_parallel_size,
client_id=0,
local_data_parallel_size=self.cfg.parallel_config.data_parallel_size,
local_data_parallel_id= min(self.cfg.worker_num_per_node * self.cfg.node_rank,
self.cfg.parallel_config.data_parallel_size - 1))
local_data_parallel_id=min(
self.cfg.worker_num_per_node * self.cfg.node_rank,
self.cfg.parallel_config.data_parallel_size - 1,
),
)