Skip to content

Commit d18a637

Browse files
[feat] add metrics for yiyan adapter (#3219)
* [feat] add metrics for yiyan adapter * [fix] fix metrics num_requests_waiting and num_requests_running * [fix] fix metrics gpu_cache_usage_perc * [refactor] change where requests_number increases * [chore] rename xxx_block_num as xxx_gpu_block_num, and update their values accordingly * [chore] delete useless code
1 parent 6854506 commit d18a637

File tree

7 files changed

+181
-18
lines changed

7 files changed

+181
-18
lines changed

fastdeploy/cache_manager/prefix_cache_manager.py

Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -31,6 +31,7 @@
3131
from fastdeploy.cache_manager.cache_data import BlockNode, CacheStatus
3232
from fastdeploy.cache_manager.cache_metrics import CacheMetrics
3333
from fastdeploy.inter_communicator import EngineCacheQueue, IPCSignal
34+
from fastdeploy.metrics.metrics import main_process_metrics
3435
from fastdeploy.utils import get_logger
3536

3637
logger = get_logger("prefix_cache_manager", "prefix_cache_manager.log")
@@ -106,6 +107,10 @@ def __init__(
106107
+ f"{self.num_cpu_blocks}, bytes_per_layer_per_block {self.cache_config.bytes_per_layer_per_block}"
107108
)
108109

110+
@property
111+
def available_gpu_resource(self):
112+
return len(self.gpu_free_block_list) / self.num_gpu_blocks if self.num_gpu_blocks > 0 else 0.0
113+
109114
def launch_cache_manager(
110115
self,
111116
cache_config,
@@ -289,6 +294,9 @@ def update_cache_config(self, cache_config):
289294
heapq.heapify(self.gpu_free_block_list)
290295
self.node_id_pool = list(range(self.num_gpu_blocks + self.num_cpu_blocks))
291296

297+
main_process_metrics.max_gpu_block_num.set(self.num_gpu_blocks)
298+
main_process_metrics.available_gpu_resource.set(1.0)
299+
292300
def _enable_cpu_cache(self):
293301
"""
294302
_enable_cpu_cache function used to enable cpu cache.
@@ -324,6 +332,8 @@ def allocate_gpu_blocks(self, num_blocks):
324332
logger.info(
325333
f"allocate_gpu_blocks: {allocated_block_ids}, len(self.gpu_free_block_list) {len(self.gpu_free_block_list)}"
326334
)
335+
main_process_metrics.free_gpu_block_num.set(len(self.gpu_free_block_list))
336+
main_process_metrics.available_gpu_resource.set(self.available_gpu_resource)
327337
return allocated_block_ids
328338

329339
def recycle_gpu_blocks(self, gpu_block_ids):
@@ -338,6 +348,8 @@ def recycle_gpu_blocks(self, gpu_block_ids):
338348
heapq.heappush(self.gpu_free_block_list, gpu_block_id)
339349
else:
340350
heapq.heappush(self.gpu_free_block_list, gpu_block_ids)
351+
main_process_metrics.free_gpu_block_num.set(len(self.gpu_free_block_list))
352+
main_process_metrics.available_gpu_resource.set(self.available_gpu_resource)
341353

342354
def allocate_cpu_blocks(self, num_blocks):
343355
"""

fastdeploy/engine/engine.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -439,6 +439,8 @@ def _fetch_request():
439439
get_request_pool.submit(_fetch_request)
440440
# 2. Schedule requests
441441
tasks = self.resource_manager.schedule()
442+
main_process_metrics.num_requests_waiting.dec(len(tasks))
443+
main_process_metrics.num_requests_running.inc(len(tasks))
442444
# 3. Send to engine
443445
if tasks:
444446
self.resource_manager.get_real_bsz()
@@ -476,6 +478,7 @@ def _insert_zmq_task_to_scheduler(self):
476478
request = Request.from_dict(data)
477479
start_span("ENQUEUE_ZMQ", data, trace.SpanKind.PRODUCER)
478480

481+
main_process_metrics.requests_number.inc()
479482
llm_logger.debug(f"Receive request: {request}")
480483

481484
err_msg = None

fastdeploy/engine/resource_manager.py

Lines changed: 34 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -57,14 +57,15 @@ def __init__(
5757
self.logger = llm_logger
5858
self.cfg = config.cache_config
5959
self.max_num_seqs = max_num_seqs
60-
self.stop_flags = [True] * max_num_seqs
60+
self.stop_flags = [True] * max_num_seqs # flag set to true if the slot has not been taken
6161
self.enable_prefix_cache = config.cache_config.enable_prefix_caching
6262
self.cache_manager = PrefixCacheManager(config, tensor_parallel_size, splitwise_role, local_data_parallel_id)
63-
self.tasks_list = [None] * max_num_seqs
63+
self.tasks_list = [None] * max_num_seqs # task slots
6464
self.req_dict = dict()
6565
# current batch status of the engine
6666
self.real_bsz = 0
6767
self.logger.info(f"{self.info()}")
68+
main_process_metrics.max_batch_size.set(max_num_seqs)
6869

6970
def reset_cache_config(self, cfg):
7071
"""
@@ -228,72 +229,76 @@ def allocate_resources_for_new_tasks(self, tasks):
228229
Returns:
229230
list: processed task list
230231
"""
231-
232-
allocated_position = 0
233-
processing_task_index = 0
232+
llm_logger.debug(f"Allocating resources for a batch of new tasks: {tasks}")
233+
allocated_position = 0 # number of tasks that have been allocated, also the position in request slots
234+
processing_task_index = 0 # current task
234235
processed_tasks = list()
235-
while allocated_position < self.max_num_seqs:
236-
if processing_task_index >= len(tasks):
236+
while allocated_position < self.max_num_seqs: # loop until all tasks are allocated resources for
237+
if processing_task_index >= len(tasks): # if all taskes have been tried, don't give a second chance
237238
break
238239

239240
can_insert = False
240241
while allocated_position + 1 <= self.max_num_seqs:
241242
if sum(self.stop_flags[allocated_position : allocated_position + 1]) == 1:
242-
can_insert = True
243+
can_insert = True # if there is a empty slot, try to allocate resources for current task
243244
break
244245
allocated_position += 1
245246
if can_insert:
246247
if self.stop_flags[allocated_position]:
247248

248-
task = tasks[processing_task_index]
249+
task = tasks[processing_task_index] # retrieve current task
249250

250251
if task.get("seed") is None:
251252
task.set("seed", random.randint(0, 9223372036854775807))
252253
task.idx = allocated_position
253254

254-
if self.enable_prefix_cache:
255+
if self.enable_prefix_cache: # if prefix caching is enabled
256+
# 1. request for enough blocks for current task
255257
cache_prepare_time = time.time()
256258
common_block_ids, unique_block_ids, hit_info = self.cache_manager.request_block_ids(
257259
task, self.cfg.block_size, self.cfg.dec_token_num
258260
)
259261
if unique_block_ids is None:
260262
self.logger.warning("req_id: {0} not enough blocks available".format(task["req_id"]))
261263
return
262-
264+
# 2. record cache hit information, and return the number of tokens already in cache
263265
cached_len = self._record_request_cache_info(
264266
task, common_block_ids, unique_block_ids, hit_info
265267
)
266268
task.cache_prepare_time = time.time() - cache_prepare_time
267-
269+
# 3. if prefill/decode disaggregation is enabled
268270
if task.disaggregate_info is not None:
269271
if task.disaggregate_info["role"] == "prefill":
272+
# record the slot position for current task, indexed by request id
270273
self.req_dict[task.request_id] = allocated_position
271274
task.disaggregate_info["block_tables"] = task.block_tables
272275
self._delete_cached_data(task, cached_len)
273276
elif task.disaggregate_info["role"] == "decode":
274277
self.req_dict[task.request_id] = allocated_position
275278
task.disaggregate_info["block_tables"] = task.need_block_tables
276279
else:
280+
# remove cached tokens from prompt token ids to avoid kv recomputation
277281
self._delete_cached_data(task, cached_len)
278282

279-
else:
283+
else: # if prefix caching is disabled
284+
# 1. directly allocate empty block from the cache, if there is any
280285
block_tables = self._get_block_tables(task.prompt_token_ids_len)
281286
if not block_tables:
282287
llm_logger.error(f"req_id: {task.request_id} block_tables is empty")
283-
continue
288+
continue # retry
284289
else:
285290
task.block_tables = block_tables
286291
task.need_block_tables = task.block_tables
287-
292+
# 2. if prefill/decode disaggregation is enabled
288293
if task.disaggregate_info is not None:
289294
task.disaggregate_info["block_tables"] = block_tables
290295
if task.disaggregate_info["role"] == "prefill":
291296
self.req_dict[task.request_id] = allocated_position
292297
elif task.disaggregate_info["role"] == "decode":
293298
self.req_dict[task.request_id] = allocated_position
294299

295-
processed_tasks.append(task)
296-
self.stop_flags[allocated_position] = False
300+
processed_tasks.append(task) # add current task
301+
self.stop_flags[allocated_position] = False # mark the slot as occupied
297302
task.inference_start_time = time.time()
298303
task.inference_time_cost = -1.0
299304
task.tokens_all_num = 0
@@ -307,11 +312,18 @@ def allocate_resources_for_new_tasks(self, tasks):
307312
processing_task_index += 1
308313

309314
# batch size when the statistical engine is inferring
315+
# determine batch size by index of the first slot that is not occupied
310316
for i in range(self.max_num_seqs - 1, -1, -1):
311317
if not self.stop_flags[i]:
312318
self.real_bsz = i + 1
313319
break
314320

321+
# record batch size here
322+
task_used_block_num = sum([len(task.block_tables) if task else 0 for task in self.tasks_list])
323+
main_process_metrics.available_gpu_block_num.set(self.total_block_number() - task_used_block_num)
324+
main_process_metrics.batch_size.set(self.max_num_seqs - self.available_batch())
325+
main_process_metrics.gpu_cache_usage_perc.set(self.get_gpu_cache_usage_perc())
326+
315327
self.logger.info(
316328
f"Number of allocated requests: {len(tasks)}, number of " f"running requests in worker: {self.real_bsz}"
317329
)
@@ -343,6 +355,11 @@ def _record_request_cache_info(self, task, common_block_ids, unique_block_ids, h
343355
task.cpu_cache_token_num = hit_info["cpu_cache_blocks"] * self.cfg.block_size
344356
task.cache_info = (cache_block_num, no_cache_block_num)
345357

358+
# Report the number of cached tokens to Prometheus metrics
359+
main_process_metrics.prefix_cache_token_num.inc(task.num_cached_tokens)
360+
main_process_metrics.prefix_gpu_cache_token_num.inc(task.gpu_cache_token_num)
361+
main_process_metrics.prefix_cpu_cache_token_num.inc(task.cpu_cache_token_num)
362+
346363
cached_len = len(common_block_ids) * self.cfg.block_size
347364
task.block_tables = common_block_ids + unique_block_ids
348365
task.need_block_tables = unique_block_ids

fastdeploy/engine/sched/resource_manager_v1.py

Lines changed: 18 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -27,6 +27,7 @@
2727

2828
from fastdeploy.engine.request import Request, RequestStatus, RequestType
2929
from fastdeploy.engine.resource_manager import ResourceManager
30+
from fastdeploy.metrics.metrics import main_process_metrics
3031
from fastdeploy.utils import llm_logger
3132

3233

@@ -75,6 +76,7 @@ def __init__(self, max_num_seqs, config, tensor_parallel_size, splitwise_role, l
7576
self.running: list[Request] = []
7677
self.finish_execution_pool = ThreadPoolExecutor(max_workers=1)
7778
self.lock = threading.Lock()
79+
main_process_metrics.max_batch_size.set(max_num_seqs)
7880

7981
def allocated_slots(self, request: Request):
8082
return len(request.block_tables) * self.config.cache_config.block_size
@@ -98,6 +100,9 @@ def _prepare_preempt_task(self, request):
98100
return ScheduledPreemptTask(idx=request.idx, request_id=request.request_id)
99101

100102
def _trigger_preempt(self, request, num_new_blocks, preempted_reqs, scheduled_reqs):
103+
"""
104+
If the request cannot be scheduled, preempt the running request one by one until it can be scheduled. Last in, first out.
105+
"""
101106
can_schedule = True
102107
while True:
103108
if not self.cache_manager.can_allocate_gpu_blocks(num_new_blocks):
@@ -201,6 +206,9 @@ def exist_prefill(self, scheduled_reqs):
201206
return False
202207

203208
def schedule(self):
209+
"""
210+
Try to pull a batch of requests from the waiting queue and schedule them.
211+
"""
204212
with self.lock:
205213
scheduled_reqs: list[Request] = []
206214
preempted_reqs: list[Request] = []
@@ -262,7 +270,7 @@ def schedule(self):
262270
request.block_tables.extend(self.cache_manager.allocate_gpu_blocks(num_new_block))
263271
# Prepare prefill task
264272
scheduled_reqs.append(self._prepare_prefill_task(request, num_new_tokens))
265-
else:
273+
else: # Not enough blocks to allocate, trigger preemption
266274
can_schedule = self._trigger_preempt(request, num_new_block, preempted_reqs, scheduled_reqs)
267275
if not can_schedule:
268276
break
@@ -328,6 +336,10 @@ def schedule(self):
328336
else:
329337
llm_logger.error("Unknown request status type")
330338
if scheduled_reqs:
339+
task_used_block_num = sum([len(task.block_tables) if task else 0 for task in self.tasks_list])
340+
main_process_metrics.available_gpu_block_num.set(self.total_block_number() - task_used_block_num)
341+
main_process_metrics.batch_size.set(self.max_num_seqs - self.available_batch())
342+
main_process_metrics.gpu_cache_usage_perc.set(self.get_gpu_cache_usage_perc())
331343
llm_logger.debug(f"schedued_reqs: {scheduled_reqs}")
332344
return scheduled_reqs
333345

@@ -369,6 +381,11 @@ def get_prefix_cached_blocks(self, request: Request):
369381
request.block_tables = common_block_ids
370382
request.skip_allocate = False
371383

384+
# Report the number of cached tokens to Prometheus metrics
385+
main_process_metrics.prefix_cache_token_num.inc(matched_token_num)
386+
main_process_metrics.prefix_gpu_cache_token_num.inc(request.gpu_cache_token_num)
387+
main_process_metrics.prefix_cpu_cache_token_num.inc(request.cpu_cache_token_num)
388+
372389
if matched_token_num == request.prompt_token_ids_len:
373390
request.num_computed_tokens = matched_token_num - 1
374391
request.skip_allocate = True

fastdeploy/metrics/metrics.py

Lines changed: 101 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -154,6 +154,22 @@ class MetricsManager:
154154
spec_decode_num_emitted_tokens_total: "Counter"
155155
spec_decode_draft_single_head_acceptance_rate: "list[Gauge]"
156156

157+
# for YIYAN Adapter
158+
prefix_cache_token_num: "Gauge"
159+
prefix_gpu_cache_token_num: "Gauge"
160+
prefix_cpu_cache_token_num: "Gauge"
161+
prefix_ssd_cache_token_num: "Gauge"
162+
batch_size: "Gauge"
163+
max_batch_size: "Gauge"
164+
available_gpu_block_num: "Gauge"
165+
free_gpu_block_num: "Gauge"
166+
max_gpu_block_num: "Gauge"
167+
available_gpu_resource: "Gauge"
168+
requests_number: "Counter"
169+
send_cache_failed_num: "Counter"
170+
first_token_latency: "Gauge"
171+
infer_latency: "Gauge"
172+
157173
# 定义所有指标配置
158174
METRICS = {
159175
"num_requests_running": {
@@ -258,6 +274,91 @@ class MetricsManager:
258274
"description": "Total number of successfully processed requests",
259275
"kwargs": {},
260276
},
277+
# for YIYAN Adapter
278+
"prefix_cache_token_num": {
279+
"type": Counter,
280+
"name": "fastdeploy:prefix_cache_token_num",
281+
"description": "Total number of cached tokens",
282+
"kwargs": {},
283+
},
284+
"prefix_gpu_cache_token_num": {
285+
"type": Counter,
286+
"name": "fastdeploy:prefix_gpu_cache_token_num",
287+
"description": "Total number of cached tokens on GPU",
288+
"kwargs": {},
289+
},
290+
"prefix_cpu_cache_token_num": {
291+
"type": Counter,
292+
"name": "fastdeploy:prefix_cpu_cache_token_num",
293+
"description": "Total number of cached tokens on CPU",
294+
"kwargs": {},
295+
},
296+
"prefix_ssd_cache_token_num": {
297+
"type": Counter,
298+
"name": "fastdeploy:prefix_ssd_cache_token_num",
299+
"description": "Total number of cached tokens on SSD",
300+
"kwargs": {},
301+
},
302+
"batch_size": {
303+
"type": Gauge,
304+
"name": "fastdeploy:batch_size",
305+
"description": "Real batch size during inference",
306+
"kwargs": {},
307+
},
308+
"max_batch_size": {
309+
"type": Gauge,
310+
"name": "fastdeploy:max_batch_size",
311+
"description": "Maximum batch size determined when service started",
312+
"kwargs": {},
313+
},
314+
"available_gpu_block_num": {
315+
"type": Gauge,
316+
"name": "fastdeploy:available_gpu_block_num",
317+
"description": "Number of available gpu blocks in cache, including prefix caching blocks that are not officially released",
318+
"kwargs": {},
319+
},
320+
"free_gpu_block_num": {
321+
"type": Gauge,
322+
"name": "fastdeploy:free_gpu_block_num",
323+
"description": "Number of free blocks in cache",
324+
"kwargs": {},
325+
},
326+
"max_gpu_block_num": {
327+
"type": Gauge,
328+
"name": "fastdeploy:max_gpu_block_num",
329+
"description": "Number of total blocks determined when service started",
330+
"kwargs": {},
331+
},
332+
"available_gpu_resource": {
333+
"type": Gauge,
334+
"name": "fastdeploy:available_gpu_resource",
335+
"description": "Available blocks percentage, i.e. available_gpu_block_num / max_gpu_block_num",
336+
"kwargs": {},
337+
},
338+
"requests_number": {
339+
"type": Counter,
340+
"name": "fastdeploy:requests_number",
341+
"description": "Total number of requests received",
342+
"kwargs": {},
343+
},
344+
"send_cache_failed_num": {
345+
"type": Counter,
346+
"name": "fastdeploy:send_cache_failed_num",
347+
"description": "Total number of failures of sending cache",
348+
"kwargs": {},
349+
},
350+
"first_token_latency": {
351+
"type": Gauge,
352+
"name": "fastdeploy:first_token_latency",
353+
"description": "Latest time to first token in seconds",
354+
"kwargs": {},
355+
},
356+
"infer_latency": {
357+
"type": Gauge,
358+
"name": "fastdeploy:infer_latency",
359+
"description": "Latest time to generate one token in seconds",
360+
"kwargs": {},
361+
},
261362
}
262363
SPECULATIVE_METRICS = {}
263364

0 commit comments

Comments
 (0)